|
15 | 15 | #include <umf/memory_provider.h> |
16 | 16 | #include <umf/pools/pool_proxy.h> |
17 | 17 |
|
| 18 | +#include <algorithm> |
18 | 19 | #include <cstring> |
19 | 20 | #include <numeric> |
| 21 | +#include <random> |
20 | 22 | #include <tuple> |
21 | 23 |
|
22 | 24 | class MemoryAccessor { |
@@ -158,6 +160,110 @@ struct umfIpcTest : umf_test::test, |
158 | 160 | umf_memory_provider_ops_t *providerOps = nullptr; |
159 | 161 | pfnProviderParamsCreate providerParamsCreate = nullptr; |
160 | 162 | pfnProviderParamsDestroy providerParamsDestroy = nullptr; |
| 163 | + |
| 164 | + void concurrentGetConcurrentPutHandles(bool shuffle) { |
| 165 | + std::vector<void *> ptrs; |
| 166 | + constexpr size_t ALLOC_SIZE = 100; |
| 167 | + constexpr size_t NUM_POINTERS = 100; |
| 168 | + umf::pool_unique_handle_t pool = makePool(); |
| 169 | + ASSERT_NE(pool.get(), nullptr); |
| 170 | + |
| 171 | + for (size_t i = 0; i < NUM_POINTERS; ++i) { |
| 172 | + void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE); |
| 173 | + EXPECT_NE(ptr, nullptr); |
| 174 | + ptrs.push_back(ptr); |
| 175 | + } |
| 176 | + |
| 177 | + std::array<std::vector<umf_ipc_handle_t>, NTHREADS> ipcHandles; |
| 178 | + |
| 179 | + umf_test::syncthreads_barrier syncthreads(NTHREADS); |
| 180 | + |
| 181 | + auto getHandlesFn = [shuffle, &ipcHandles, &ptrs, |
| 182 | + &syncthreads](size_t tid) { |
| 183 | + // Each thread gets a copy of the pointers to shuffle them |
| 184 | + std::vector<void *> localPtrs = ptrs; |
| 185 | + if (shuffle) { |
| 186 | + std::random_device rd; |
| 187 | + std::mt19937 g(rd()); |
| 188 | + std::shuffle(localPtrs.begin(), localPtrs.end(), g); |
| 189 | + } |
| 190 | + syncthreads(); |
| 191 | + for (void *ptr : localPtrs) { |
| 192 | + umf_ipc_handle_t ipcHandle; |
| 193 | + size_t handleSize; |
| 194 | + umf_result_t ret = |
| 195 | + umfGetIPCHandle(ptr, &ipcHandle, &handleSize); |
| 196 | + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
| 197 | + ipcHandles[tid].push_back(ipcHandle); |
| 198 | + } |
| 199 | + }; |
| 200 | + |
| 201 | + umf_test::parallel_exec(NTHREADS, getHandlesFn); |
| 202 | + |
| 203 | + auto putHandlesFn = [&ipcHandles, &syncthreads](size_t tid) { |
| 204 | + syncthreads(); |
| 205 | + for (umf_ipc_handle_t ipcHandle : ipcHandles[tid]) { |
| 206 | + umf_result_t ret = umfPutIPCHandle(ipcHandle); |
| 207 | + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); |
| 208 | + } |
| 209 | + }; |
| 210 | + |
| 211 | + umf_test::parallel_exec(NTHREADS, putHandlesFn); |
| 212 | + |
| 213 | + for (void *ptr : ptrs) { |
| 214 | + umf_result_t ret = umfPoolFree(pool.get(), ptr); |
| 215 | + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); |
| 216 | + } |
| 217 | + |
| 218 | + pool.reset(nullptr); |
| 219 | + EXPECT_EQ(stat.putCount, stat.getCount); |
| 220 | + } |
| 221 | + |
| 222 | + void concurrentGetPutHandles(bool shuffle) { |
| 223 | + std::vector<void *> ptrs; |
| 224 | + constexpr size_t ALLOC_SIZE = 100; |
| 225 | + constexpr size_t NUM_POINTERS = 100; |
| 226 | + umf::pool_unique_handle_t pool = makePool(); |
| 227 | + ASSERT_NE(pool.get(), nullptr); |
| 228 | + |
| 229 | + for (size_t i = 0; i < NUM_POINTERS; ++i) { |
| 230 | + void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE); |
| 231 | + EXPECT_NE(ptr, nullptr); |
| 232 | + ptrs.push_back(ptr); |
| 233 | + } |
| 234 | + |
| 235 | + umf_test::syncthreads_barrier syncthreads(NTHREADS); |
| 236 | + |
| 237 | + auto getPutHandlesFn = [shuffle, &ptrs, &syncthreads](size_t) { |
| 238 | + // Each thread gets a copy of the pointers to shuffle them |
| 239 | + std::vector<void *> localPtrs = ptrs; |
| 240 | + if (shuffle) { |
| 241 | + std::random_device rd; |
| 242 | + std::mt19937 g(rd()); |
| 243 | + std::shuffle(localPtrs.begin(), localPtrs.end(), g); |
| 244 | + } |
| 245 | + syncthreads(); |
| 246 | + for (void *ptr : localPtrs) { |
| 247 | + umf_ipc_handle_t ipcHandle; |
| 248 | + size_t handleSize; |
| 249 | + umf_result_t ret = |
| 250 | + umfGetIPCHandle(ptr, &ipcHandle, &handleSize); |
| 251 | + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
| 252 | + ret = umfPutIPCHandle(ipcHandle); |
| 253 | + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); |
| 254 | + } |
| 255 | + }; |
| 256 | + |
| 257 | + umf_test::parallel_exec(NTHREADS, getPutHandlesFn); |
| 258 | + |
| 259 | + for (void *ptr : ptrs) { |
| 260 | + umf_result_t ret = umfPoolFree(pool.get(), ptr); |
| 261 | + EXPECT_EQ(ret, UMF_RESULT_SUCCESS); |
| 262 | + } |
| 263 | + |
| 264 | + pool.reset(nullptr); |
| 265 | + EXPECT_EQ(stat.putCount, stat.getCount); |
| 266 | + } |
161 | 267 | }; |
162 | 268 |
|
163 | 269 | TEST_P(umfIpcTest, GetIPCHandleSize) { |
@@ -473,53 +579,18 @@ TEST_P(umfIpcTest, openInTwoIpcHandlers) { |
473 | 579 | EXPECT_EQ(stat.closeCount, stat.openCount); |
474 | 580 | } |
475 | 581 |
|
476 | | -TEST_P(umfIpcTest, ConcurrentGetPutHandles) { |
477 | | - std::vector<void *> ptrs; |
478 | | - constexpr size_t ALLOC_SIZE = 100; |
479 | | - constexpr size_t NUM_POINTERS = 100; |
480 | | - umf::pool_unique_handle_t pool = makePool(); |
481 | | - ASSERT_NE(pool.get(), nullptr); |
482 | | - |
483 | | - for (size_t i = 0; i < NUM_POINTERS; ++i) { |
484 | | - void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE); |
485 | | - EXPECT_NE(ptr, nullptr); |
486 | | - ptrs.push_back(ptr); |
487 | | - } |
488 | | - |
489 | | - std::array<std::vector<umf_ipc_handle_t>, NTHREADS> ipcHandles; |
490 | | - |
491 | | - umf_test::syncthreads_barrier syncthreads(NTHREADS); |
492 | | - |
493 | | - auto getHandlesFn = [&ipcHandles, &ptrs, &syncthreads](size_t tid) { |
494 | | - syncthreads(); |
495 | | - for (void *ptr : ptrs) { |
496 | | - umf_ipc_handle_t ipcHandle; |
497 | | - size_t handleSize; |
498 | | - umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize); |
499 | | - ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
500 | | - ipcHandles[tid].push_back(ipcHandle); |
501 | | - } |
502 | | - }; |
503 | | - |
504 | | - umf_test::parallel_exec(NTHREADS, getHandlesFn); |
505 | | - |
506 | | - auto putHandlesFn = [&ipcHandles, &syncthreads](size_t tid) { |
507 | | - syncthreads(); |
508 | | - for (umf_ipc_handle_t ipcHandle : ipcHandles[tid]) { |
509 | | - umf_result_t ret = umfPutIPCHandle(ipcHandle); |
510 | | - EXPECT_EQ(ret, UMF_RESULT_SUCCESS); |
511 | | - } |
512 | | - }; |
| 582 | +TEST_P(umfIpcTest, ConcurrentGetConcurrentPutHandles) { |
| 583 | + concurrentGetConcurrentPutHandles(false); |
| 584 | +} |
513 | 585 |
|
514 | | - umf_test::parallel_exec(NTHREADS, putHandlesFn); |
| 586 | +TEST_P(umfIpcTest, ConcurrentGetConcurrentPutHandlesShuffled) { |
| 587 | + concurrentGetConcurrentPutHandles(true); |
| 588 | +} |
515 | 589 |
|
516 | | - for (void *ptr : ptrs) { |
517 | | - umf_result_t ret = umfPoolFree(pool.get(), ptr); |
518 | | - EXPECT_EQ(ret, UMF_RESULT_SUCCESS); |
519 | | - } |
| 590 | +TEST_P(umfIpcTest, ConcurrentGetPutHandles) { concurrentGetPutHandles(false); } |
520 | 591 |
|
521 | | - pool.reset(nullptr); |
522 | | - EXPECT_EQ(stat.putCount, stat.getCount); |
| 592 | +TEST_P(umfIpcTest, ConcurrentGetPutHandlesShuffled) { |
| 593 | + concurrentGetPutHandles(true); |
523 | 594 | } |
524 | 595 |
|
525 | 596 | TEST_P(umfIpcTest, ConcurrentOpenCloseHandles) { |
|
0 commit comments