Skip to content

Commit 60ba7c7

Browse files
authored
Remove InRange scalar Op (#1699)
* Initial plan * Remove InRange scalar op and add missing __rtruediv__ and __rfloordiv__ * Remove __rtruediv__ and __rfloordiv__ changes per review feedback --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
1 parent b9af952 commit 60ba7c7

File tree

3 files changed

+0
-79
lines changed

3 files changed

+0
-79
lines changed

pytensor/compile/profiling.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1464,7 +1464,6 @@ def print_tips(self, file):
14641464
ps.GE,
14651465
ps.EQ,
14661466
ps.NEQ,
1467-
ps.InRange,
14681467
ps.Switch,
14691468
ps.OR,
14701469
ps.XOR,

pytensor/scalar/basic.py

Lines changed: 0 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1650,56 +1650,6 @@ def c_code_cache_version(self):
16501650
isinf = IsInf()
16511651

16521652

1653-
class InRange(LogicalComparison):
1654-
nin = 3
1655-
1656-
def __init__(self, openlow, openhi):
1657-
self.openlow = openlow
1658-
self.openhi = openhi
1659-
1660-
def impl(self, x, low, hi):
1661-
if self.openlow and x <= low:
1662-
return False
1663-
elif not self.openlow and x < low:
1664-
return False
1665-
if self.openhi and x >= hi:
1666-
return False
1667-
elif not self.openhi and x > hi:
1668-
return False
1669-
return True
1670-
1671-
def c_code(self, node, name, inputs, outputs, sub):
1672-
(x, low, hi) = inputs
1673-
(z,) = outputs
1674-
1675-
cmp1 = ">" if self.openlow else ">="
1676-
cmp2 = "<" if self.openhi else "<="
1677-
1678-
return f"{z} = {x} {cmp1} {low} && {x} {cmp2} {hi};"
1679-
1680-
def get_grad(self, elem):
1681-
if elem.type in complex_types:
1682-
msg = (
1683-
"No gradient implemented for complex numbers in "
1684-
"class scalar.basic.InRange"
1685-
)
1686-
raise NotImplementedError(msg)
1687-
elif elem.type in discrete_types:
1688-
return elem.zeros_like(dtype=config.floatX)
1689-
else:
1690-
return elem.zeros_like()
1691-
1692-
def L_op(self, inputs, outputs, gout):
1693-
(x, low, hi) = inputs
1694-
(_gz,) = gout
1695-
grads = [self.get_grad(elem) for elem in [x, low, hi]]
1696-
return grads
1697-
1698-
1699-
inopenrange = InRange(True, True)
1700-
inclosedrange = InRange(False, False)
1701-
1702-
17031653
class Switch(ScalarOp):
17041654
nin = 3
17051655
nfunc_spec = ("where", 3, 1)

tests/scalar/test_basic.py

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,13 @@
33

44
import pytensor
55
import pytensor.tensor as pt
6-
import tests.unittest_tools as utt
76
from pytensor.compile.mode import Mode
87
from pytensor.graph.fg import FunctionGraph
98
from pytensor.link.c.basic import DualLinker
109
from pytensor.scalar.basic import (
1110
EQ,
1211
ComplexError,
1312
Composite,
14-
InRange,
1513
ScalarType,
1614
add,
1715
and_,
@@ -475,32 +473,6 @@ def test_grad_identity():
475473
pytensor.gradient.grad(l, x)
476474

477475

478-
def test_grad_inrange():
479-
for bound_definition in [(True, True), (False, False)]:
480-
# Instantiate op, and then take the gradient
481-
op = InRange(*bound_definition)
482-
x = fscalar("x")
483-
low = fscalar("low")
484-
high = fscalar("high")
485-
out = op(x, low, high)
486-
gx, glow, ghigh = pytensor.gradient.grad(out, [x, low, high])
487-
488-
# We look if the gradient are equal to zero
489-
# if x is lower than the lower bound,
490-
# equal to the lower bound, between lower and higher bound,
491-
# equal to the higher bound and higher than the higher
492-
# bound.
493-
# Mathematically we should have an infinite gradient when
494-
# x is equal to the lower or higher bound but in that case
495-
# PyTensor defines the gradient to be zero for stability.
496-
f = pytensor.function([x, low, high], [gx, glow, ghigh])
497-
utt.assert_allclose(f(0, 1, 5), [0, 0, 0])
498-
utt.assert_allclose(f(1, 1, 5), [0, 0, 0])
499-
utt.assert_allclose(f(2, 1, 5), [0, 0, 0])
500-
utt.assert_allclose(f(5, 1, 5), [0, 0, 0])
501-
utt.assert_allclose(f(7, 1, 5), [0, 0, 0])
502-
503-
504476
def test_grad_abs():
505477
a = fscalar("a")
506478
b = 0.5 * (a + pytensor.tensor.abs(a))

0 commit comments

Comments
 (0)