|
4 | 4 | import ctypes |
5 | 5 | import multiprocessing |
6 | 6 | import os |
7 | | -from contextlib import contextmanager |
8 | 7 | from functools import partial |
9 | 8 | from logging import getLogger |
10 | 9 | # COMPAT python 3.7 : Using heap instead of Array, |
@@ -88,28 +87,10 @@ def _create_shared_array(shape, dtype: Union[str, np.dtype] = np.float32): |
88 | 87 | return data.reshape(shape) |
89 | 88 |
|
90 | 89 |
|
91 | | -@contextmanager |
92 | | -def temp_shared_array(shape, dtype: NP_DTYPE = np.float32) -> np.ndarray: |
93 | | - array = _create_shared_array(shape, dtype) |
94 | | - try: |
95 | | - yield array |
96 | | - finally: |
97 | | - pass |
98 | | - |
99 | | - |
100 | 90 | def get_cores(): |
101 | 91 | return multiprocessing.cpu_count() |
102 | 92 |
|
103 | 93 |
|
104 | | -def generate_indices(num_images): |
105 | | - """ |
106 | | - Generate indices for each image. |
107 | | -
|
108 | | - :param num_images: The number of images. |
109 | | - """ |
110 | | - return range(num_images) |
111 | | - |
112 | | - |
113 | 94 | def calculate_chunksize(cores): |
114 | 95 | # TODO possible proper calculation of chunksize, although best performance |
115 | 96 | # has been with 1 |
@@ -143,7 +124,7 @@ def multiprocessing_necessary(shape: Union[int, Tuple[int, int, int]], cores) -> |
143 | 124 | def execute_impl(img_num: int, partial_func: partial, cores: int, chunksize: int, progress: Progress, msg: str): |
144 | 125 | task_name = f"{msg} {cores}c {chunksize}chs" |
145 | 126 | progress = Progress.ensure_instance(progress, num_steps=img_num, task_name=task_name) |
146 | | - indices_list = generate_indices(img_num) |
| 127 | + indices_list = range(img_num) |
147 | 128 | if multiprocessing_necessary(img_num, cores): |
148 | 129 | with Pool(cores) as pool: |
149 | 130 | for _ in pool.imap(partial_func, indices_list, chunksize=chunksize): |
|
0 commit comments