|
26 | 26 | */ |
27 | 27 |
|
28 | 28 | #include "Tasks.h" |
29 | | -#include <pthread.h> |
30 | | -#include <sys/sysctl.h> |
31 | | -#include <libkern/OSAtomic.h> |
32 | 29 |
|
33 | | -typedef struct HapCodecTaskRecord |
34 | | -{ |
35 | | - unsigned int group; |
36 | | - HapCodecTaskWorkFunction func; |
37 | | - void *context; |
38 | | - unsigned int running; |
39 | | -} HapCodecTaskRecord; |
40 | | - |
41 | | -// TODO: contain these in a struct which we malloc/free reducing our loaded code footprint and |
42 | | -// making init/cleanup faster (we can set the pointer fast then teardown outside the lock) |
43 | | -static OSSpinLock mGlobalLock = OS_SPINLOCK_INIT; |
44 | | -static unsigned int mSenderCount = 0U; |
45 | | -static int mInitted = 0; |
46 | | -static unsigned int mThreadCount = 0U; |
47 | | -static pthread_mutex_t mThreadLock; |
48 | | -static pthread_cond_t mTaskWaitCond; |
49 | | -static pthread_cond_t mFeedWaitCond; |
50 | | -static HapCodecTaskRecord *mTasks; |
51 | | - |
52 | | -static int HapCodecTasksGetMaximumThreadCount(); |
53 | | - |
54 | | -static void *HapCodecTasksThread(void *info) |
55 | | -{ |
56 | | -#pragma unused (info) |
57 | | - int done = 0; |
58 | | - pthread_mutex_lock(&mThreadLock); |
59 | | - do |
60 | | - { |
61 | | - int i = 0; |
62 | | - int ran = 0; |
63 | | - for (i = 0; i < HapCodecTasksGetMaximumThreadCount(); i++) |
64 | | - { |
65 | | - if (mTasks[i].func != NULL && mTasks[i].running == 0) |
66 | | - { |
67 | | - mTasks[i].running = 1U; |
68 | | - pthread_mutex_unlock(&mThreadLock); |
69 | | - mTasks[i].func(mTasks[i].context); |
70 | | - pthread_mutex_lock(&mThreadLock); |
71 | | - mTasks[i].func = NULL; |
72 | | - mTasks[i].running = 0; |
73 | | - ran = 1; |
74 | | - break; |
75 | | - } |
76 | | - } |
77 | | - if (ran == 0 && mSenderCount == 0) |
78 | | - { |
79 | | - done = 1; |
80 | | - } |
81 | | - if (ran == 1) |
82 | | - { |
83 | | - pthread_cond_signal(&mFeedWaitCond); // TODO: check we actually need to signal perhaps by wrapping our semaphores into a counting semaphore pseudo-class |
84 | | - } |
85 | | - if (done == 0 && ran == 0) |
86 | | - { |
87 | | - pthread_cond_wait(&mTaskWaitCond, &mThreadLock); |
88 | | - } |
89 | | - } |
90 | | - while (done == 0); |
91 | | - mThreadCount--; |
92 | | - if (mThreadCount == 0) |
93 | | - { |
94 | | - pthread_cond_signal(&mFeedWaitCond); |
95 | | - } |
96 | | - pthread_mutex_unlock(&mThreadLock); |
97 | | - return NULL; |
98 | | -} |
99 | | - |
100 | | -static int HapCodecTasksGetMaximumThreadCount() |
101 | | -{ |
102 | | - static int mMaxThreadCount = 0; |
103 | | - if (mMaxThreadCount == 0) |
104 | | - { |
105 | | - int mib[2] = {CTL_HW, HW_NCPU}; |
106 | | - size_t len = sizeof(mMaxThreadCount); |
107 | | - |
108 | | - // could use something like and watch for changes to eg power state |
109 | | - // sysctlbyname("hw.activecpu", &ncpu, &len, NULL, 0); |
110 | | - |
111 | | - int result = sysctl(mib, 2, &mMaxThreadCount, &len, NULL, 0); |
112 | | - if (result != 0) |
113 | | - { |
114 | | - mMaxThreadCount = 4; // conservative guess if we couldn't get a value |
115 | | - } |
116 | | - } |
117 | | - return mMaxThreadCount; |
118 | | -} |
119 | | - |
120 | | -static int HapCodecTasksInit() |
121 | | -{ |
122 | | - if (mInitted == 0) |
123 | | - { |
124 | | - mThreadCount = 0U; |
125 | | - // This isn't ideal doing these longer operations inside a spinlock... once at load using an initializer? |
126 | | - if (pthread_mutex_init(&mThreadLock, NULL) != 0) |
127 | | - { |
128 | | - return 1; |
129 | | - } |
130 | | - if (pthread_cond_init(&mTaskWaitCond, NULL) != 0) |
131 | | - { |
132 | | - pthread_mutex_destroy(&mThreadLock); |
133 | | - return 1; |
134 | | - } |
135 | | - if (pthread_cond_init(&mFeedWaitCond, NULL) != 0) |
136 | | - { |
137 | | - pthread_mutex_destroy(&mThreadLock); |
138 | | - pthread_cond_destroy(&mTaskWaitCond); |
139 | | - return 1; |
140 | | - } |
141 | | - mTasks = malloc(sizeof(HapCodecTaskRecord) * HapCodecTasksGetMaximumThreadCount()); |
142 | | - int i; |
143 | | - for (i = 0; i < HapCodecTasksGetMaximumThreadCount(); i++) |
144 | | - { |
145 | | - mTasks[i].running = 0; |
146 | | - mTasks[i].func = NULL; |
147 | | - } |
148 | | - mInitted = 1; |
149 | | - } |
150 | | - return 0; |
151 | | -} |
| 30 | +struct HapCodecTaskGroup { |
| 31 | + HapCodecTaskWorkFunction task; |
| 32 | + dispatch_group_t group; |
| 33 | + dispatch_queue_t queue; |
| 34 | + dispatch_semaphore_t semaphore; |
| 35 | +}; |
152 | 36 |
|
153 | | -static void HapCodecTasksCleanup(void) |
| 37 | +void HapCodecTasksAddTask(HapCodecTaskGroupRef group, void *context) |
154 | 38 | { |
155 | | - if (mInitted != 0) |
| 39 | + if (group && group->group && group->queue && group->task && group->semaphore) |
156 | 40 | { |
157 | | - // TODO: we could do this on the last thread if we are sure we can avoid creating a new instance over the top of it |
158 | | - pthread_mutex_lock(&mThreadLock); |
159 | | - pthread_cond_broadcast(&mTaskWaitCond); |
160 | | - while (mThreadCount > 0) |
161 | | - { |
162 | | - pthread_cond_wait(&mFeedWaitCond, &mThreadLock); |
163 | | - } |
164 | | - pthread_mutex_unlock(&mThreadLock); |
165 | | - pthread_mutex_destroy(&mThreadLock); |
166 | | - pthread_cond_destroy(&mTaskWaitCond); |
167 | | - pthread_cond_destroy(&mFeedWaitCond); |
168 | | - free(mTasks); |
169 | | - mTasks = NULL; |
170 | | - mInitted = 0; |
| 41 | + dispatch_semaphore_wait(group->semaphore, DISPATCH_TIME_FOREVER); |
| 42 | + // Copy values in case group is released before execution |
| 43 | + HapCodecTaskWorkFunction task = group->task; |
| 44 | + dispatch_semaphore_t semaphore = group->semaphore; |
| 45 | + // Retain semaphore for block |
| 46 | + dispatch_retain(semaphore); |
| 47 | + dispatch_group_async(group->group, group->queue, ^{ |
| 48 | + task(context); |
| 49 | + dispatch_semaphore_signal(semaphore); |
| 50 | + dispatch_release(semaphore); |
| 51 | + }); |
171 | 52 | } |
172 | 53 | } |
173 | 54 |
|
174 | | -void HapCodecTasksWillStart(void) |
| 55 | +void HapCodecTasksWaitForGroupToComplete(HapCodecTaskGroupRef group) |
175 | 56 | { |
176 | | - OSSpinLockLock(&mGlobalLock); |
177 | | - mSenderCount++; |
178 | | - if (mSenderCount == 1U) |
179 | | - { |
180 | | - HapCodecTasksInit(); |
181 | | - } |
182 | | - OSSpinLockUnlock(&mGlobalLock); |
183 | | -} |
184 | | - |
185 | | -void HapCodecTasksWillStop(void) |
186 | | -{ |
187 | | - OSSpinLockLock(&mGlobalLock); |
188 | | - mSenderCount--; |
189 | | - if (mSenderCount == 0U) |
190 | | - { |
191 | | - // clear state and stop our threads outside of the lock |
192 | | - HapCodecTasksCleanup(); |
193 | | - } |
194 | | - OSSpinLockUnlock(&mGlobalLock); |
| 57 | + if (group && group->group) dispatch_group_wait(group->group, DISPATCH_TIME_FOREVER); |
195 | 58 | } |
196 | 59 |
|
197 | | -void HapCodecTasksAddTask(HapCodecTaskWorkFunction task, unsigned int group, void *context) |
| 60 | +HapCodecTaskGroupRef HapCodecTasksCreateGroup(HapCodecTaskWorkFunction task, unsigned int maxTasks) |
198 | 61 | { |
199 | | - pthread_mutex_lock(&mThreadLock); |
200 | | - // Check to see if we can spawn a new thread for this task |
201 | | - if (mThreadCount < HapCodecTasksGetMaximumThreadCount()) |
| 62 | + HapCodecTaskGroupRef group = NULL; |
| 63 | + if (task) |
202 | 64 | { |
203 | | - pthread_t thread; |
204 | | - pthread_attr_t attr; |
205 | | - pthread_attr_init(&attr); |
206 | | - pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED); |
207 | | - if (pthread_create(&thread, &attr, HapCodecTasksThread, NULL) == 0) |
| 65 | + group = malloc(sizeof(struct HapCodecTaskGroup)); |
| 66 | + if (group) |
208 | 67 | { |
209 | | - mThreadCount++; |
210 | | - } |
211 | | - } |
212 | | - // setup the task in a free slot, waiting for one if necessary |
213 | | - int i; |
214 | | - int found = 0; |
215 | | - do |
216 | | - { |
217 | | - for (i = 0; i < HapCodecTasksGetMaximumThreadCount(); i++) |
218 | | - { |
219 | | - if (mTasks[i].func == NULL) |
| 68 | + group->task = task; |
| 69 | + group->group = dispatch_group_create(); |
| 70 | + group->queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); |
| 71 | + group->semaphore = dispatch_semaphore_create(maxTasks); |
| 72 | + if (group->group == NULL || group->queue == NULL || group->semaphore == NULL) |
220 | 73 | { |
221 | | - found = 1; |
222 | | - mTasks[i].func = task; |
223 | | - mTasks[i].group = group; |
224 | | - mTasks[i].context = context; |
225 | | - break; |
| 74 | + HapCodecTasksDestroyGroup(group); |
| 75 | + group = NULL; |
226 | 76 | } |
227 | 77 | } |
228 | | - if (found == 0) |
229 | | - { |
230 | | - pthread_cond_wait(&mFeedWaitCond, &mThreadLock); |
231 | | - } |
232 | 78 | } |
233 | | - while (found == 0); |
234 | | - // signal the task thread to wake |
235 | | - pthread_cond_signal(&mTaskWaitCond); |
236 | | - pthread_mutex_unlock(&mThreadLock); |
237 | | - |
238 | | - |
| 79 | + return group; |
239 | 80 | } |
240 | 81 |
|
241 | | -void HapCodecTasksWaitForGroupToComplete(unsigned int group) |
| 82 | +void HapCodecTasksDestroyGroup(HapCodecTaskGroupRef group) |
242 | 83 | { |
243 | | - pthread_mutex_lock(&mThreadLock); |
244 | | - int done = 0; |
245 | | - do |
| 84 | + if (group) |
246 | 85 | { |
247 | | - int i; |
248 | | - done = 1; |
249 | | - for (i = 0; i < HapCodecTasksGetMaximumThreadCount(); i++) |
250 | | - { |
251 | | - if (mTasks[i].func != NULL && mTasks[i].group == group) |
252 | | - { |
253 | | - done = 0; |
254 | | - } |
255 | | - } |
256 | | - if (done == 0) |
257 | | - { |
258 | | - pthread_cond_wait(&mFeedWaitCond, &mThreadLock); |
259 | | - } |
260 | | - } while (done == 0); |
261 | | - pthread_mutex_unlock(&mThreadLock); |
262 | | -} |
263 | | - |
264 | | -unsigned int HapCodecTasksNewGroup(void) |
265 | | -{ |
266 | | - static int32_t mGroup = 0; |
267 | | - return OSAtomicIncrement32(&mGroup); |
| 86 | + if (group->group) dispatch_release(group->group); |
| 87 | + if (group->queue) dispatch_release(group->queue); |
| 88 | + if (group->semaphore) dispatch_release(group->semaphore); |
| 89 | + free(group); |
| 90 | + } |
268 | 91 | } |
0 commit comments