|
40 | 40 | /// |
41 | 41 | /// * Fast. It uses only fixed-width integer arithmetic and has |
42 | 42 | /// constant memory requirements. For double-precision values on |
43 | | -/// 64-bit processors, it is competitive with Ryu. For double-precision |
| 43 | +/// 64-bit processors, it is competitive with Ryu. For double-precision |
44 | 44 | /// values on 32-bit processors, and higher-precision values on all |
45 | 45 | /// processors, it is considerably faster. |
46 | 46 | /// |
47 | 47 | /// * Always Accurate. Converting the decimal form back to binary |
48 | | -/// will always yield exactly the same value. For the IEEE 754 |
| 48 | +/// will always yield exactly the same value. For the IEEE 754 |
49 | 49 | /// formats, the round-trip will produce exactly the same bit |
50 | 50 | /// pattern in memory. |
51 | 51 | /// |
@@ -125,7 +125,7 @@ static void intervalContainingPowerOf10_Binary32(int p, uint64_t *lower, uint64_ |
125 | 125 | #endif |
126 | 126 |
|
127 | 127 | // |
128 | | -// Helpers used by binary32, binary64, float80, and binary128 |
| 128 | +// Helpers used by binary32, binary64, float80, and binary128. |
129 | 129 | // |
130 | 130 |
|
131 | 131 | #if SWIFT_DTOA_BINARY32_SUPPORT || SWIFT_DTOA_BINARY64_SUPPORT || SWIFT_DTOA_FLOAT80_SUPPORT || SWIFT_DTOA_BINARY128_SUPPORT |
@@ -782,7 +782,7 @@ size_t swift_dtoa_optimal_binary64_p(const void *d, char *dest, size_t length) |
782 | 782 | // bias. That's because they treat the significand as a |
783 | 783 | // fixed-point number with one bit (the hidden bit) integer |
784 | 784 | // portion. The logic here reconstructs the significand as a |
785 | | - // pure fraction, so we need to accomodate that when |
| 785 | + // pure fraction, so we need to accommodate that when |
786 | 786 | // reconstructing the binary exponent. |
787 | 787 | static const int64_t exponentBias = (1 << (exponentBitCount - 1)) - 2; // 1022 |
788 | 788 |
|
@@ -911,14 +911,14 @@ size_t swift_dtoa_optimal_binary64_p(const void *d, char *dest, size_t length) |
911 | 911 | // This ensures accuracy but, as explained in Loitsch' paper, |
912 | 912 | // this carries a risk that there will be a shorter digit |
913 | 913 | // sequence outside of our narrowed interval that we will |
914 | | - // miss. This risk obviously gets lower with increased |
| 914 | + // miss. This risk obviously gets lower with increased |
915 | 915 | // precision, but it wasn't until the Errol paper that anyone |
916 | 916 | // had a good way to test whether a particular implementation |
917 | | - // had sufficient precision. That paper shows a way to enumerate |
| 917 | + // had sufficient precision. That paper shows a way to enumerate |
918 | 918 | // the worst-case numbers; those numbers that are extremely close |
919 | 919 | // to the mid-points between adjacent floating-point values. |
920 | 920 | // These are the values that might sit just outside of the |
921 | | - // narrowed interval. By testing these values, we can verify |
| 921 | + // narrowed interval. By testing these values, we can verify |
922 | 922 | // the correctness of our implementation. |
923 | 923 |
|
924 | 924 | // Multiply out the upper midpoint, rounding down... |
@@ -1202,7 +1202,8 @@ size_t swift_dtoa_optimal_binary64_p(const void *d, char *dest, size_t length) |
1202 | 1202 | // value 0.1234 and computed u = 0.1257, l = 0.1211. The above |
1203 | 1203 | // digit generation works with `u`, so produces 0.125. But the |
1204 | 1204 | // values 0.122, 0.123, and 0.124 are just as short and 0.123 is |
1205 | | - // the best choice, since it's closest to the original value. |
| 1205 | + // therefore the best choice, since it's closest to the original |
| 1206 | + // value. |
1206 | 1207 |
|
1207 | 1208 | // We know delta and t are both less than 10.0 here, so we can |
1208 | 1209 | // shed some excess integer bits to simplify the following: |
|
0 commit comments