|
35 | 35 | // Forward declarations |
36 | 36 | static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool); |
37 | 37 | static bool bucket_can_pool(bucket_t *bucket); |
38 | | -static void bucket_decrement_pool(bucket_t *bucket); |
39 | 38 | static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, |
40 | 39 | bool *from_pool); |
41 | 40 |
|
@@ -317,6 +316,7 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab, |
317 | 316 | assert(slab_it->val != NULL); |
318 | 317 | pool_unregister_slab(bucket->pool, slab_it->val); |
319 | 318 | DL_DELETE(bucket->available_slabs, slab_it); |
| 319 | + assert(bucket->available_slabs_num > 0); |
320 | 320 | bucket->available_slabs_num--; |
321 | 321 | destroy_slab(slab_it->val); |
322 | 322 | } |
@@ -382,10 +382,17 @@ static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, |
382 | 382 | // Allocation from existing slab is treated as from pool for statistics. |
383 | 383 | *from_pool = true; |
384 | 384 | if (slab->num_chunks_allocated == 0) { |
| 385 | + assert(bucket->chunked_slabs_in_pool > 0); |
| 386 | + assert(bucket->shared_limits->total_size >= |
| 387 | + bucket_slab_alloc_size(bucket)); |
385 | 388 | // If this was an empty slab, it was in the pool. |
386 | 389 | // Now it is no longer in the pool, so update count. |
387 | 390 | --bucket->chunked_slabs_in_pool; |
388 | | - bucket_decrement_pool(bucket); |
| 391 | + CACHE_ALIGNED size_t size_to_add = |
| 392 | + -(long long)bucket_slab_alloc_size(bucket); |
| 393 | + utils_fetch_and_add64(&bucket->shared_limits->total_size, |
| 394 | + size_to_add); |
| 395 | + bucket_update_stats(bucket, 1, -1); |
389 | 396 | } |
390 | 397 | } |
391 | 398 |
|
@@ -421,36 +428,26 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) { |
421 | 428 | in_pool * bucket_slab_alloc_size(bucket); |
422 | 429 | } |
423 | 430 |
|
424 | | -static void bucket_decrement_pool(bucket_t *bucket) { |
425 | | - bucket_update_stats(bucket, 1, -1); |
426 | | - utils_fetch_and_add64(&bucket->shared_limits->total_size, |
427 | | - -(long long)bucket_slab_alloc_size(bucket)); |
428 | | -} |
429 | | - |
430 | 431 | static bool bucket_can_pool(bucket_t *bucket) { |
431 | 432 | size_t new_free_slabs_in_bucket; |
432 | 433 |
|
433 | 434 | new_free_slabs_in_bucket = bucket->chunked_slabs_in_pool + 1; |
434 | 435 |
|
435 | 436 | // we keep at most params.capacity slabs in the pool |
436 | 437 | if (bucket_max_pooled_slabs(bucket) >= new_free_slabs_in_bucket) { |
437 | | - size_t pool_size = 0; |
438 | | - utils_atomic_load_acquire(&bucket->shared_limits->total_size, |
439 | | - &pool_size); |
440 | | - while (true) { |
441 | | - size_t new_pool_size = pool_size + bucket_slab_alloc_size(bucket); |
442 | | - |
443 | | - if (bucket->shared_limits->max_size < new_pool_size) { |
444 | | - break; |
445 | | - } |
446 | | - |
447 | | - if (utils_compare_exchange(&bucket->shared_limits->total_size, |
448 | | - &pool_size, &new_pool_size)) { |
449 | | - ++bucket->chunked_slabs_in_pool; |
450 | | - |
451 | | - bucket_update_stats(bucket, -1, 1); |
452 | | - return true; |
453 | | - } |
| 438 | + |
| 439 | + CACHE_ALIGNED size_t size_to_add = bucket_slab_alloc_size(bucket); |
| 440 | + size_t previous_size = utils_fetch_and_add64( |
| 441 | + &bucket->shared_limits->total_size, size_to_add); |
| 442 | + |
| 443 | + if (previous_size + size_to_add <= bucket->shared_limits->max_size) { |
| 444 | + |
| 445 | + ++bucket->chunked_slabs_in_pool; |
| 446 | + bucket_update_stats(bucket, -1, 1); |
| 447 | + return true; |
| 448 | + } else { |
| 449 | + utils_fetch_and_add64(&bucket->shared_limits->total_size, |
| 450 | + -(long long)size_to_add); |
454 | 451 | } |
455 | 452 | } |
456 | 453 |
|
|
0 commit comments