Skip to content

Commit d669913

Browse files
authored
fix o-s class on OrangePi (#2227)
1 parent 408ce34 commit d669913

File tree

4 files changed

+26
-13
lines changed

4 files changed

+26
-13
lines changed

mindtorch/_apis/cpu.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1255,4 +1255,7 @@ def cumprod(input, dim, dtype):
12551255
return out
12561256

12571257
def lerp(input, end, weight):
1258-
return legacy.lerp(input, end, weight)
1258+
return legacy.lerp(input, end, weight)
1259+
1260+
def smooth_l1_loss(input, target, beta=1.0, reduction='none'):
1261+
return legacy.smooth_l1_loss(input, target, beta, reduction)

mindtorch/_apis/npu.py

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -804,7 +804,7 @@ def index(input, index):
804804
def scatter(input, dim, index, src):
805805
if use_pyboost() and not ON_ORANGE_PI:
806806
return pyboost.scatter_op(input, dim, index, src)
807-
return legacy.tensor_scatter_elements(input, index, src, dim, "none")
807+
return legacy.tensor_scatter_elements(input, index, cast(src, input.dtype), dim, "none")
808808

809809
def tril(input, diagonal=0):
810810
if use_pyboost():
@@ -858,7 +858,8 @@ def isinf(input):
858858
def sort(input, dim, descending, stable):
859859
if use_pyboost() and not ON_ORANGE_PI:
860860
return pyboost.sort_ext_op(input, dim, descending, stable)
861-
return legacy.sort(input, dim, descending)
861+
out = legacy.sort(input, dim, descending)
862+
return out[0], cast(out[1], mindspore.int64)
862863

863864
def prod(input, axis, keepdims, dtype):
864865
if use_pyboost():
@@ -1612,9 +1613,15 @@ def inplace_add(input, other, alpha):
16121613
return legacy.inplace_add(input, other)
16131614

16141615
def logsumexp(input, dim, keepdim):
1615-
if use_pyboost():
1616+
if use_pyboost() and not ON_ORANGE_PI:
16161617
return pyboost.logsumexp_op(input, dim, keepdim)
1617-
return legacy.logsumexp(input, dim, keepdim)
1618+
input_max = legacy.reduce_max(input, dim, True)
1619+
input_exp = exp(sub(input, input_max))
1620+
input_sumexp = sum(input_exp, dim, keepdim, None)
1621+
input_logsumexp = log(input_sumexp)
1622+
if not keepdim:
1623+
input_max = squeeze(input_max, dim)
1624+
return add(input_logsumexp, input_max)
16181625

16191626
def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity):
16201627
loss, log_alpha = legacy.ctc_loss_v2(log_probs, targets, input_lengths, target_lengths, blank, 'none', zero_infinity)
@@ -1922,9 +1929,11 @@ def linalg_qr(input_x, mode):
19221929

19231930
def bernoulli(input, generator):
19241931
seed, offset = generator._step(12)
1925-
if use_pyboost():
1932+
if use_pyboost() and not ON_ORANGE_PI:
19261933
return pyboost.bernoulli_ext_op(input, seed, offset)
1927-
return legacy.bernoulli(input, seed, offset)
1934+
uniform = rand_like(input, generator, input.dtype)
1935+
result = cast(less(uniform, input), input.dtype)
1936+
return result
19281937

19291938
def multinomial(input, num_samples, replacement, generator):
19301939
seed, offset = generator._step(12) # pylint: disable=protected-access
@@ -1998,4 +2007,7 @@ def replication_pad_1d(input, padding):
19982007
return pyboost.reflection_pad_1d_op(input, padding)
19992008

20002009
def hardtanh(input, min_val, max_val):
2001-
return pyboost.hardtanh_op(input, min_val, max_val)
2010+
return pyboost.hardtanh_op(input, min_val, max_val)
2011+
2012+
def smooth_l1_loss(input, target, beta=1.0, reduction='none'):
2013+
return pyboost.smooth_l1_loss_impl(input, target, beta, reduction)

mindtorch/nn/functional.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -556,9 +556,7 @@ def l1_loss(input, target, reduction='mean'):
556556
return execute('l1_loss', input, target, reduction)
557557

558558
def smooth_l1_loss(input, target, beta=1.0, reduction='none'):
559-
input = input.to(mindtorch.float32)
560-
target = target.to(mindtorch.float32)
561-
return ops.smooth_l1_loss(input, target, beta, reduction)
559+
return execute('smooth_l1_loss', input, target, beta, reduction)
562560

563561
def kl_div(input, target, reduction='mean', log_target=False):
564562
if reduction == 'batchmean':

mindtorch/ops/array.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ def _record_tensor_index(index, remain_indexes, dim, device):
548548

549549
while dim > len(remain_indexes):
550550
# use empty_tensor with dim_num 9 to indicate unused dim
551-
if device.type == 'npu':
551+
if device.type == 'npu' and not ON_ORANGE_PI:
552552
remain_indexes.append(empty_tensor_9d)
553553
else:
554554
remain_indexes.append(slice(None, None, None))
@@ -650,7 +650,7 @@ def tensor_getitem(self, index):
650650
if not remain_indexes:
651651
return self_viewed
652652

653-
if self.device.type == 'npu':
653+
if self.device.type == 'npu' and not ON_ORANGE_PI:
654654
return execute('index', self_viewed, remain_indexes)
655655

656656
return getitem(self_viewed, tuple(remain_indexes) if len(remain_indexes) > 1 else remain_indexes[0])

0 commit comments

Comments
 (0)