@@ -35,7 +35,7 @@ class StringOpsTests(Tf2OnnxBackendTestBase):
3535
3636 @requires_custom_ops ("StringRegexReplace" )
3737 def test_static_regex_replace (self ):
38- text_val = np .array ([["Hello world!" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = np . str )
38+ text_val = np .array ([["Hello world!" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = str )
3939 def func (text ):
4040 x_ = tf .strings .regex_replace (text , " " , "_" , replace_global = True )
4141 return tf .identity (x_ , name = _TFOUTPUT )
@@ -44,9 +44,9 @@ def func(text):
4444 @requires_custom_ops ("StringJoin" )
4545 @check_opset_min_version (8 , "Expand" )
4646 def test_string_join (self ):
47- text_val1 = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = np . str )
48- text_val2 = np .array ([["b" , "Test 1 2 3" ], ["Hi there" , "suits ♠♣♥♦" ]], dtype = np . str )
49- text_val3 = np .array ("Some scalar text" , dtype = np . str )
47+ text_val1 = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = str )
48+ text_val2 = np .array ([["b" , "Test 1 2 3" ], ["Hi there" , "suits ♠♣♥♦" ]], dtype = str )
49+ text_val3 = np .array ("Some scalar text" , dtype = str )
5050 def func (text1 , text2 , text3 ):
5151 x_ = tf .strings .join ([text1 , text2 , text3 ], separator = "±" )
5252 return tf .identity (x_ , name = _TFOUTPUT )
@@ -55,7 +55,7 @@ def func(text1, text2, text3):
5555 @requires_custom_ops ("StringSplit" )
5656 @check_tf_min_version ("2.0" , "result is sparse not ragged in tf1" )
5757 def test_string_split (self ):
58- text_val = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = np . str )
58+ text_val = np .array ([["a" , "Test 1 2 3" ], ["Hi there" , "test test" ]], dtype = str )
5959 def func (text ):
6060 x = tf .strings .split (text , sep = ' ' ).flat_values
6161 x_ = tf .identity (x , name = _TFOUTPUT )
@@ -64,7 +64,7 @@ def func(text):
6464
6565 @requires_custom_ops ("StringToHashBucketFast" )
6666 def test_string_to_hash_bucket_fast (self ):
67- text_val = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = np . str )
67+ text_val = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = str )
6868 def func (text ):
6969 x = tf .strings .to_hash_bucket_fast (text , 20 )
7070 x_ = tf .identity (x , name = _TFOUTPUT )
@@ -73,8 +73,8 @@ def func(text):
7373
7474 @requires_custom_ops ("StringEqual" )
7575 def test_string_equal (self ):
76- text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = np . str )
77- text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = np . str )
76+ text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = str )
77+ text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = str )
7878 def func (text1 , text2 ):
7979 x = tf .equal (text1 , text2 )
8080 x_ = tf .identity (x , name = _TFOUTPUT )
@@ -83,8 +83,8 @@ def func(text1, text2):
8383
8484 @requires_custom_ops ("StringNotEqual" )
8585 def test_string_not_equal (self ):
86- text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = np . str )
87- text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = np . str )
86+ text_val1 = np .array ([["a" , "Test 1 2 3" , "♠♣" ], ["Hi there" , "test test" , "♥♦" ]], dtype = str )
87+ text_val2 = np .array ([["a" , "Test 2 4 6" , "♠♣" ], ["Hello" , "test test" , "♥ ♦" ]], dtype = str )
8888 def func (text1 , text2 ):
8989 x = tf .not_equal (text1 , text2 )
9090 x_ = tf .identity (x , name = _TFOUTPUT )
@@ -116,7 +116,7 @@ def test_regex_split_with_offsets(self):
116116 from tensorflow_text .python .ops .regex_split_ops import (
117117 gen_regex_split_ops as lib_gen_regex_split_ops )
118118 text_val = np .array (["a Test 1 2 3 ♠♣" ,
119- "Hi there test test ♥♦" ], dtype = np . str )
119+ "Hi there test test ♥♦" ], dtype = str )
120120 def func (text ):
121121 tokens , begin_offsets , end_offsets , row_splits = lib_gen_regex_split_ops .regex_split_with_offsets (
122122 text , "(\\ s)" , "" )
@@ -153,7 +153,7 @@ def _CreateTable(vocab, num_oov=1):
153153 init , num_oov , lookup_key_dtype = tf .string )
154154
155155 vocab = _CreateTable (["great" , "they" , "the" , "##'" , "##re" , "##est" ])
156- text_val = np .array (["they're" , "the" , "greatest" ], dtype = np . str )
156+ text_val = np .array (["they're" , "the" , "greatest" ], dtype = str )
157157
158158 def func (text ):
159159 inputs = ragged_tensor .convert_to_tensor_or_ragged_tensor (text )
0 commit comments