|
50 | 50 | bitwise_and, |
51 | 51 | bitwise_or, |
52 | 52 | bitwise_xor, |
| 53 | + cast, |
53 | 54 | conj, |
54 | 55 | cosh, |
55 | 56 | deg2rad, |
|
124 | 125 | dvector, |
125 | 126 | fmatrices, |
126 | 127 | fmatrix, |
| 128 | + fscalar, |
127 | 129 | ftensor4, |
128 | 130 | fvector, |
129 | 131 | imatrices, |
@@ -4069,25 +4071,36 @@ def test_exp_over_1_plus_exp(self): |
4069 | 4071 |
|
4070 | 4072 | def test_local_1msigmoid(self): |
4071 | 4073 | m = self.get_mode(excluding=["fusion", "inplace"]) |
4072 | | - x = fmatrix() |
| 4074 | + x = fscalar() |
| 4075 | + xd = dscalar() |
4073 | 4076 |
|
4074 | 4077 | # Test `exp_over_1_plus_exp` |
4075 | 4078 | f = pytensor.function([x], 1 - exp(x) / (1 + exp(x)), mode=m) |
4076 | 4079 | # FIXME: PatternNodeRewriter does not copy stack trace |
4077 | 4080 | # (see https://github.com/Theano/Theano/issues/4581) |
4078 | 4081 | # assert check_stack_trace(f, ops_to_check=[neg, sigmoid]) |
4079 | | - assert [node.op for node in f.maker.fgraph.toposort()] == [neg, sigmoid] |
| 4082 | + assert equal_computations(f.maker.fgraph.outputs, [sigmoid(-x)]) |
4080 | 4083 |
|
4081 | 4084 | # Test `inv_1_plus_exp` |
4082 | 4085 | f = pytensor.function([x], 1 - pt.fill(x, 1.0) / (1 + exp(-x)), mode=m) |
4083 | 4086 | # assert check_stack_trace(f, ops_to_check=[neg, sigmoid]) |
4084 | | - assert [node.op for node in f.maker.fgraph.toposort()] == [neg, sigmoid] |
| 4087 | + assert equal_computations(f.maker.fgraph.outputs, [sigmoid(-x)]) |
4085 | 4088 |
|
4086 | 4089 | # Test float constant |
4087 | | - f = pytensor.function( |
4088 | | - [x], np.array(1.000001, dtype="float32") - sigmoid(x), mode=m |
4089 | | - ) |
4090 | | - assert [node.op for node in f.maker.fgraph.toposort()] == [neg, sigmoid] |
| 4090 | + for out, expected in [ |
| 4091 | + (np.array(1.0, "float32") - sigmoid(x), sigmoid(-x)), |
| 4092 | + (np.array(1.0, "float64") - pt.sigmoid(x), cast(sigmoid(-x), "float64")), |
| 4093 | + (np.array(1.0, "float32") - sigmoid(xd), sigmoid(-xd)), |
| 4094 | + (np.array(1.0, "float64") - sigmoid(xd), sigmoid(-xd)), |
| 4095 | + (np.sum(1 / np.array([2, 3, 6], "float32")) - sigmoid(x), sigmoid(-x)), |
| 4096 | + (np.sum(1 / np.array([2, 3, 6], "float64")) - sigmoid(xd), sigmoid(-xd)), |
| 4097 | + (np.float32(1 - 9e-6) - sigmoid(x), np.float32(1 - 9e-6) - sigmoid(x)), |
| 4098 | + (np.float64(1 - 1e-9) - sigmoid(xd), np.float64(1 - 1e-9) - sigmoid(xd)), |
| 4099 | + ]: |
| 4100 | + rewritten = rewrite_graph( |
| 4101 | + out, include=["canonicalize", "specialize", "stabilize"] |
| 4102 | + ) |
| 4103 | + utt.assert_equal_computations([rewritten], [expected], original=out) |
4091 | 4104 |
|
4092 | 4105 | def test_local_sigm_times_exp(self): |
4093 | 4106 | """ |
@@ -4235,7 +4248,8 @@ def test_log1msigm_to_softplus(self): |
4235 | 4248 | f(np.random.random((54, 11)).astype(config.floatX)) |
4236 | 4249 |
|
4237 | 4250 | # Test close to 1 |
4238 | | - out = log(1.000001 - sigmoid(x)) |
| 4251 | + x_dtype = np.dtype(x.dtype).type |
| 4252 | + out = log(np.nextafter(x_dtype(1), x_dtype(2)) - sigmoid(x)) |
4239 | 4253 | f = pytensor.function([x], out, mode=self.m) |
4240 | 4254 | topo = f.maker.fgraph.toposort() |
4241 | 4255 | assert len(topo) == 2 |
|
0 commit comments