|
18 | 18 |
|
19 | 19 | import numpy as np |
20 | 20 |
|
21 | | -from .casting import (shared_range, type_info, OK_FLOATS) |
| 21 | +from .casting import shared_range, OK_FLOATS |
22 | 22 | from .openers import Opener, BZ2File, IndexedGzipFile |
23 | 23 | from .deprecated import deprecate_with_version |
24 | 24 | from .externals.oset import OrderedSet |
@@ -372,74 +372,6 @@ def make_dt_codes(codes_seqs): |
372 | 372 | return Recoder(dt_codes, fields + ['dtype', 'sw_dtype'], DtypeMapper) |
373 | 373 |
|
374 | 374 |
|
375 | | -@deprecate_with_version('can_cast deprecated. ' |
376 | | - 'Please use arraywriter classes instead', |
377 | | - '1.2', |
378 | | - '3.0') |
379 | | -def can_cast(in_type, out_type, has_intercept=False, has_slope=False): |
380 | | - """ Return True if we can safely cast ``in_type`` to ``out_type`` |
381 | | -
|
382 | | - Parameters |
383 | | - ---------- |
384 | | - in_type : numpy type |
385 | | - type of data we will case from |
386 | | - out_dtype : numpy type |
387 | | - type that we want to cast to |
388 | | - has_intercept : bool, optional |
389 | | - Whether we can subtract a constant from the data (before scaling) |
390 | | - before casting to ``out_dtype``. Default is False |
391 | | - has_slope : bool, optional |
392 | | - Whether we can use a scaling factor to adjust slope of |
393 | | - relationship of data to data in cast array. Default is False |
394 | | -
|
395 | | - Returns |
396 | | - ------- |
397 | | - tf : bool |
398 | | - True if we can safely cast, False otherwise |
399 | | -
|
400 | | - Examples |
401 | | - -------- |
402 | | - >>> can_cast(np.float64, np.float32) # doctest: +SKIP |
403 | | - True |
404 | | - >>> can_cast(np.complex128, np.float32) # doctest: +SKIP |
405 | | - False |
406 | | - >>> can_cast(np.int64, np.float32) # doctest: +SKIP |
407 | | - True |
408 | | - >>> can_cast(np.float32, np.int16) # doctest: +SKIP |
409 | | - False |
410 | | - >>> can_cast(np.float32, np.int16, False, True) # doctest: +SKIP |
411 | | - True |
412 | | - >>> can_cast(np.int16, np.uint8) # doctest: +SKIP |
413 | | - False |
414 | | -
|
415 | | - Whether we can actually cast int to uint when we don't have an intercept |
416 | | - depends on the data. That's why this function isn't very useful. But we |
417 | | - assume that an integer is using its full range, and check whether scaling |
418 | | - works in that situation. |
419 | | -
|
420 | | - Here we need an intercept to scale the full range of an int to a uint |
421 | | -
|
422 | | - >>> can_cast(np.int16, np.uint8, False, True) # doctest: +SKIP |
423 | | - False |
424 | | - >>> can_cast(np.int16, np.uint8, True, True) # doctest: +SKIP |
425 | | - True |
426 | | - """ |
427 | | - in_dtype = np.dtype(in_type) |
428 | | - # Whether we can cast depends on the data, and we've only got the type. |
429 | | - # Let's assume integers use all of their range but floats etc not |
430 | | - if in_dtype.kind in 'iu': |
431 | | - info = np.iinfo(in_dtype) |
432 | | - data = np.array([info.min, info.max], dtype=in_dtype) |
433 | | - else: # Float or complex or something. Any old thing will do |
434 | | - data = np.ones((1,), in_type) |
435 | | - from .arraywriters import make_array_writer, WriterError |
436 | | - try: |
437 | | - make_array_writer(data, out_type, has_slope, has_intercept) |
438 | | - except WriterError: |
439 | | - return False |
440 | | - return True |
441 | | - |
442 | | - |
443 | 375 | def _is_compressed_fobj(fobj): |
444 | 376 | """ Return True if fobj represents a compressed data file-like object |
445 | 377 | """ |
@@ -1005,154 +937,6 @@ def working_type(in_type, slope=1.0, inter=0.0): |
1005 | 937 | return val.dtype.type |
1006 | 938 |
|
1007 | 939 |
|
1008 | | -@deprecate_with_version('calculate_scale deprecated. ' |
1009 | | - 'Please use arraywriter classes instead', |
1010 | | - '1.2', |
1011 | | - '3.0') |
1012 | | -def calculate_scale(data, out_dtype, allow_intercept): |
1013 | | - """ Calculate scaling and optional intercept for data |
1014 | | -
|
1015 | | - Parameters |
1016 | | - ---------- |
1017 | | - data : array |
1018 | | - out_dtype : dtype |
1019 | | - output data type in some form understood by ``np.dtype`` |
1020 | | - allow_intercept : bool |
1021 | | - If True allow non-zero intercept |
1022 | | -
|
1023 | | - Returns |
1024 | | - ------- |
1025 | | - scaling : None or float |
1026 | | - scalefactor to divide into data. None if no valid data |
1027 | | - intercept : None or float |
1028 | | - intercept to subtract from data. None if no valid data |
1029 | | - mn : None or float |
1030 | | - minimum of finite value in data or None if this will not |
1031 | | - be used to threshold data |
1032 | | - mx : None or float |
1033 | | - minimum of finite value in data, or None if this will not |
1034 | | - be used to threshold data |
1035 | | - """ |
1036 | | - # Code here is a compatibility shell around arraywriters refactor |
1037 | | - in_dtype = data.dtype |
1038 | | - out_dtype = np.dtype(out_dtype) |
1039 | | - if np.can_cast(in_dtype, out_dtype): |
1040 | | - return 1.0, 0.0, None, None |
1041 | | - from .arraywriters import make_array_writer, WriterError, get_slope_inter |
1042 | | - try: |
1043 | | - writer = make_array_writer(data, out_dtype, True, allow_intercept) |
1044 | | - except WriterError as e: |
1045 | | - raise ValueError(str(e)) |
1046 | | - if out_dtype.kind in 'fc': |
1047 | | - return (1.0, 0.0, None, None) |
1048 | | - mn, mx = writer.finite_range() |
1049 | | - if (mn, mx) == (np.inf, -np.inf): # No valid data |
1050 | | - return (None, None, None, None) |
1051 | | - if in_dtype.kind not in 'fc': |
1052 | | - mn, mx = (None, None) |
1053 | | - return get_slope_inter(writer) + (mn, mx) |
1054 | | - |
1055 | | - |
1056 | | -@deprecate_with_version('scale_min_max deprecated. Please use arraywriter ' |
1057 | | - 'classes instead.', |
1058 | | - '1.2', |
1059 | | - '3.0') |
1060 | | -def scale_min_max(mn, mx, out_type, allow_intercept): |
1061 | | - """ Return scaling and intercept min, max of data, given output type |
1062 | | -
|
1063 | | - Returns ``scalefactor`` and ``intercept`` to best fit data with |
1064 | | - given ``mn`` and ``mx`` min and max values into range of data type |
1065 | | - with ``type_min`` and ``type_max`` min and max values for type. |
1066 | | -
|
1067 | | - The calculated scaling is therefore:: |
1068 | | -
|
1069 | | - scaled_data = (data-intercept) / scalefactor |
1070 | | -
|
1071 | | - Parameters |
1072 | | - ---------- |
1073 | | - mn : scalar |
1074 | | - data minimum value |
1075 | | - mx : scalar |
1076 | | - data maximum value |
1077 | | - out_type : numpy type |
1078 | | - numpy type of output |
1079 | | - allow_intercept : bool |
1080 | | - If true, allow calculation of non-zero intercept. Otherwise, |
1081 | | - returned intercept is always 0.0 |
1082 | | -
|
1083 | | - Returns |
1084 | | - ------- |
1085 | | - scalefactor : numpy scalar, dtype=np.maximum_sctype(np.float64) |
1086 | | - scalefactor by which to divide data after subtracting intercept |
1087 | | - intercept : numpy scalar, dtype=np.maximum_sctype(np.float64) |
1088 | | - value to subtract from data before dividing by scalefactor |
1089 | | -
|
1090 | | - Examples |
1091 | | - -------- |
1092 | | - >>> scale_min_max(0, 255, np.uint8, False) # doctest: +SKIP |
1093 | | - (1.0, 0.0) |
1094 | | - >>> scale_min_max(-128, 127, np.int8, False) # doctest: +SKIP |
1095 | | - (1.0, 0.0) |
1096 | | - >>> scale_min_max(0, 127, np.int8, False) # doctest: +SKIP |
1097 | | - (1.0, 0.0) |
1098 | | - >>> scaling, intercept = scale_min_max(0, 127, np.int8, True) # doctest: +SKIP |
1099 | | - >>> np.allclose((0 - intercept) / scaling, -128) # doctest: +SKIP |
1100 | | - True |
1101 | | - >>> np.allclose((127 - intercept) / scaling, 127) # doctest: +SKIP |
1102 | | - True |
1103 | | - >>> scaling, intercept = scale_min_max(-10, -1, np.int8, True) # doctest: +SKIP |
1104 | | - >>> np.allclose((-10 - intercept) / scaling, -128) # doctest: +SKIP |
1105 | | - True |
1106 | | - >>> np.allclose((-1 - intercept) / scaling, 127) # doctest: +SKIP |
1107 | | - True |
1108 | | - >>> scaling, intercept = scale_min_max(1, 10, np.int8, True) # doctest: +SKIP |
1109 | | - >>> np.allclose((1 - intercept) / scaling, -128) # doctest: +SKIP |
1110 | | - True |
1111 | | - >>> np.allclose((10 - intercept) / scaling, 127) # doctest: +SKIP |
1112 | | - True |
1113 | | -
|
1114 | | - Notes |
1115 | | - ----- |
1116 | | - We don't use this function anywhere in nibabel now, it's here for API |
1117 | | - compatibility only. |
1118 | | -
|
1119 | | - The large integers lead to python long types as max / min for type. |
1120 | | - To contain the rounding error, we need to use the maximum numpy |
1121 | | - float types when casting to float. |
1122 | | - """ |
1123 | | - if mn > mx: |
1124 | | - raise ValueError('min value > max value') |
1125 | | - info = type_info(out_type) |
1126 | | - mn, mx, type_min, type_max = np.array( |
1127 | | - [mn, mx, info['min'], info['max']], np.maximum_sctype(np.float)) |
1128 | | - # with intercept |
1129 | | - if allow_intercept: |
1130 | | - data_range = mx - mn |
1131 | | - if data_range == 0: |
1132 | | - return 1.0, mn |
1133 | | - type_range = type_max - type_min |
1134 | | - scaling = data_range / type_range |
1135 | | - intercept = mn - type_min * scaling |
1136 | | - return scaling, intercept |
1137 | | - # without intercept |
1138 | | - if mx == 0 and mn == 0: |
1139 | | - return 1.0, 0.0 |
1140 | | - if type_min == 0: # uint |
1141 | | - if mn < 0 and mx > 0: |
1142 | | - raise ValueError('Cannot scale negative and positive ' |
1143 | | - 'numbers to uint without intercept') |
1144 | | - if mx < 0: |
1145 | | - scaling = mn / type_max |
1146 | | - else: |
1147 | | - scaling = mx / type_max |
1148 | | - else: # int |
1149 | | - if abs(mx) >= abs(mn): |
1150 | | - scaling = mx / type_max |
1151 | | - else: |
1152 | | - scaling = mn / type_min |
1153 | | - return scaling, 0.0 |
1154 | | - |
1155 | | - |
1156 | 940 | def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): |
1157 | 941 | """ float type containing int type `ifmt` * `slope` + `inter` |
1158 | 942 |
|
|
0 commit comments