|
3 | 3 |
|
4 | 4 | #define PY_SSIZE_T_CLEAN |
5 | 5 | #include <Python.h> |
| 6 | +#include <dlpack/dlpack.h> |
6 | 7 |
|
7 | 8 | #include "numpy/arrayobject.h" |
8 | 9 | #include "common/npy_argparse.h" |
@@ -100,19 +101,6 @@ array_get_dl_device(PyArrayObject *self) { |
100 | 101 | return ret; |
101 | 102 | } |
102 | 103 |
|
103 | | -static char * |
104 | | -array_get_dl_data(PyArrayObject *self) { |
105 | | - PyObject *base = PyArray_BASE(self); |
106 | | - if (PyCapsule_IsValid(base, NPY_DLPACK_INTERNAL_CAPSULE_NAME)) { |
107 | | - DLManagedTensor *managed = PyCapsule_GetPointer( |
108 | | - base, NPY_DLPACK_INTERNAL_CAPSULE_NAME); |
109 | | - if (managed == NULL) { |
110 | | - return NULL; |
111 | | - } |
112 | | - return managed->dl_tensor.data; |
113 | | - } |
114 | | - return PyArray_DATA(self); |
115 | | -} |
116 | 104 |
|
117 | 105 | PyObject * |
118 | 106 | array_dlpack(PyArrayObject *self, |
@@ -202,24 +190,28 @@ array_dlpack(PyArrayObject *self, |
202 | 190 | if (PyErr_Occurred()) { |
203 | 191 | return NULL; |
204 | 192 | } |
205 | | - char *data = array_get_dl_data(self); |
206 | | - if (data == NULL) { |
207 | | - return NULL; |
208 | | - } |
209 | | - if ((char *)PyArray_DATA(self) - data != 0) { |
210 | | - PyErr_SetString(PyExc_TypeError, |
211 | | - "Offsets not clearly supported by this " |
212 | | - "version of DLPack."); |
213 | | - return NULL; |
214 | | - } |
215 | 193 |
|
216 | 194 | DLManagedTensor *managed = PyMem_Malloc(sizeof(DLManagedTensor)); |
217 | 195 | if (managed == NULL) { |
218 | 196 | PyErr_NoMemory(); |
219 | 197 | return NULL; |
220 | 198 | } |
221 | 199 |
|
222 | | - managed->dl_tensor.data = data; |
| 200 | + /* |
| 201 | + * Note: the `dlpack.h` header suggests/standardizes that `data` must be |
| 202 | + * 256-byte aligned. We ignore this intentionally, because `__dlpack__` |
| 203 | + * standardizes that `byte_offset` must be 0 (for now) to not break pytorch: |
| 204 | + * https://github.com/data-apis/array-api/issues/293#issuecomment-964111413 |
| 205 | + * |
| 206 | + * We further assume that exporting fully unaligned data is OK even without |
| 207 | + * `byte_offset` since the standard does not reject it. |
| 208 | + * Presumably, pytorch will support importing `byte_offset != 0` and NumPy |
| 209 | + * can choose to use it starting about 2023. At that point, it may be |
| 210 | + * that NumPy MUST use `byte_offset` to adhere to the standard (as |
| 211 | + * specified in the header)! |
| 212 | + */ |
| 213 | + managed->dl_tensor.data = PyArray_DATA(self); |
| 214 | + managed->dl_tensor.byte_offset = 0; |
223 | 215 | managed->dl_tensor.device = device; |
224 | 216 | managed->dl_tensor.dtype = managed_dtype; |
225 | 217 |
|
|
0 commit comments