Skip to content

Commit 6284782

Browse files
alealvAlejandro Gaston Alvarez Franceschi
andauthored
Fix typos (#2018)
* Fix typos * Fix lambda variable * Use only ascii characters --------- Co-authored-by: Alejandro Gaston Alvarez Franceschi <alejandro.alvarez@projectx.ai>
1 parent b2f7190 commit 6284782

File tree

123 files changed

+226
-225
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

123 files changed

+226
-225
lines changed

coremltools/_deps/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

66
"""
7-
List of all external dependancies for this package. Imported as
7+
List of all external dependencies for this package. Imported as
88
optional includes
99
"""
1010
import platform as _platform

coremltools/converters/_converters_entry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def convert(
163163
in the TF model.
164164
- If ``name`` is specified with ``TensorType`` and ``ImageType``, it
165165
must correspond to a placeholder op in the TF graph. The input names
166-
in the converted Core ML model can later be modifed using the
166+
in the converted Core ML model can later be modified using the
167167
``ct.utils.rename_feature`` API.
168168
- If ``dtype`` is not specified, it defaults to the ``dtype`` of the
169169
inputs in the TF model.

coremltools/converters/libsvm/_libsvm_converter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
7777
# input will be a single array
7878
if input_length == "auto":
7979
print(
80-
"[WARNING] Infering an input length of %d. If this is not correct,"
80+
"[WARNING] Inferring an input length of %d. If this is not correct,"
8181
" use the 'input_length' parameter." % inferred_length
8282
)
8383
input_length = inferred_length
@@ -167,7 +167,7 @@ def convert(libsvm_model, feature_names, target, input_length, probability):
167167
else:
168168
svm.rho = libsvm_model.rho[0]
169169

170-
# set coefficents
170+
# set coefficients
171171
if svm_type_enum == _svm.C_SVC or svm_type_enum == _svm.NU_SVC:
172172
for _ in range(nr_class - 1):
173173
svm.coefficients.add()

coremltools/converters/mil/backend/mil/load.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ def convert_function(function, parameters, blob_writer, opset):
226226
return pm.Function(inputs=inputs, opset=opset, block_specializations={opset: block})
227227

228228
# Add a classify op to the output.
229-
# Replaces the original probabilites output (in the containing MIL block)
229+
# Replaces the original probabilities output (in the containing MIL block)
230230
# with the outputs of the classifier op. Returns the name of the original
231231
# probabilities output variable.
232232
def _add_classify_op(prog, classifier_config):

coremltools/converters/mil/backend/mil/passes/fuse_pow2_sqrt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
def _match_pattern(op):
1111
pow_op, sqrt_op = None, None
1212

13-
# check the curernt op is pow(2) or sqrt
13+
# check the current op is pow(2) or sqrt
1414
if op.op_type == "pow" and op.y.val == 2:
1515
pow_op = op
1616
if op.op_type == "sqrt":

coremltools/converters/mil/backend/nn/load.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ def _set_user_inputs(proto, inputs):
157157

158158

159159
def _set_symbolic_inputs(proto, symbolic_inputs):
160-
# Set symbolic input shapes by -1 infered from graph
160+
# Set symbolic input shapes by -1 inferred from graph
161161
for input_name, shape in symbolic_inputs.items():
162162
lb = [1 if is_symbolic(d) else d for d in shape]
163163
ub = [-1 if is_symbolic(d) else d for d in shape]

coremltools/converters/mil/backend/nn/op_mapping.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ def conv_helper(const_context, builder, op):
557557

558558
if is_conv1d or is_conv2d:
559559
if weights is None and has_bias:
560-
# weights are dyanmic.
560+
# weights are dynamic.
561561
# In this case, bias, if present, cannot be part of the conv op
562562
# it needs to be added separately via an add op
563563
out_name += "_without_bias"
@@ -3537,7 +3537,7 @@ def _realloc_list(const_context, builder, ls_var, index_var, value_var, mode):
35373537
# (1)
35383538
# check if we need to re-initialize the tensorarray:
35393539
# it happens when the elem_shape is runtime determined and the runtime shape is not equal to
3540-
# the default shape. Ex: elem_shape is = [i0, 10] (initilized with [1, 10]) and at the runtime we get [2, 10].
3540+
# the default shape. Ex: elem_shape is = [i0, 10] (initialized with [1, 10]) and at the runtime we get [2, 10].
35413541

35423542
# (2)
35433543
# If index_var >= len(ls_var), reallocate the array and copy over existing

coremltools/converters/mil/backend/nn/passes/mlmodel_passes.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ def _remove_layers_from_spec(nn_spec, layers_to_delete):
175175

176176
def _get_disconnected_layers_rec(nn_spec):
177177
"""
178-
- Iteraters over layers in bottom-up fashion
178+
- Iterates over layers in bottom-up fashion
179179
- Collect layers if it's output is not being used (marks and does lazy deletion)
180180
- Recursively iterates over NN Spec if layer is Loop or Branch
181181
"""
@@ -245,7 +245,7 @@ def _decrease_input_degree(layer):
245245
and len(else_layers_to_delete) == total_else_layers
246246
):
247247
# If both branches are empty after dead-layer elimination
248-
# remove branch layer altogehter
248+
# remove branch layer altogether
249249
layers_to_delete.append(_layer)
250250
_decrease_input_degree(_layer)
251251
continue
@@ -422,7 +422,7 @@ def solve_dp(layers):
422422
For example, if sol_num[10] = 5, this means after index 10, we can at most remove 5 nodes.
423423
sol_bt[i] keeps the first starting point of identity sequence which results in the
424424
optimal solution after index i.
425-
For example, if sol_num[10] = 12, means that in order to get rid of the maxium number of
425+
For example, if sol_num[10] = 12, means that in order to get rid of the maximum number of
426426
nodes after 10, the first starting point is index 12.
427427
After construct sol_num and sol_bt by dynamic programming, we backtrack for the optimal
428428
solution using sol_bt.

coremltools/converters/mil/backend/nn/passes/test_mlmodel_passes.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,7 @@ def _test_builder(self, builder, input_shape, expected_layer_num=None):
350350

351351
def test_output_edge_case(self):
352352

353-
# For now for safety purpose, the node which are output should't be merged
353+
# For now for safety purpose, the node which are output shouldn't be merged
354354
input_shape = (1, 10, 5)
355355
input_features = [("data", datatypes.Array(*input_shape))]
356356
output_features = [("out", None)]
@@ -374,7 +374,7 @@ def test_output_edge_case(self):
374374

375375
def test_output_edge_case_2(self):
376376

377-
# For now for safety purpose, the node which are output should't be merged
377+
# For now for safety purpose, the node which are output shouldn't be merged
378378
input_shape = (1, 10, 5)
379379
input_features = [("data", datatypes.Array(*input_shape))]
380380
output_features = [("out", None)]
@@ -675,7 +675,7 @@ def test_branch_structure(self):
675675
RELU_2
676676
t_0, t_1, t_3 can be merged.
677677
t_4, t_5 can be merged.
678-
The output shuld be
678+
The output should be
679679
INPUT
680680
|
681681
.------.

coremltools/converters/mil/experimental/passes/generic_conv_bias_fusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
...
4747
4848
When taking all of the conv/conv_tranpose, transpose/no transpose, and add/sub into account,
49-
We end up with a total of 8 patterns (2^3). These patterns are paramaterized by the pattern_to_detect
49+
We end up with a total of 8 patterns (2^3). These patterns are parameterized by the pattern_to_detect
5050
function below.
5151
"""
5252

0 commit comments

Comments
 (0)