|
36 | 36 | // Forward declarations |
37 | 37 | static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool); |
38 | 38 | static bool bucket_can_pool(bucket_t *bucket); |
39 | | -static void bucket_decrement_pool(bucket_t *bucket); |
40 | 39 | static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, |
41 | 40 | bool *from_pool); |
42 | 41 |
|
@@ -318,6 +317,7 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab, |
318 | 317 | assert(slab_it->val != NULL); |
319 | 318 | pool_unregister_slab(bucket->pool, slab_it->val); |
320 | 319 | DL_DELETE(bucket->available_slabs, slab_it); |
| 320 | + assert(bucket->available_slabs_num > 0); |
321 | 321 | bucket->available_slabs_num--; |
322 | 322 | destroy_slab(slab_it->val); |
323 | 323 | } |
@@ -383,10 +383,16 @@ static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, |
383 | 383 | // Allocation from existing slab is treated as from pool for statistics. |
384 | 384 | *from_pool = true; |
385 | 385 | if (slab->num_chunks_allocated == 0) { |
| 386 | + assert(bucket->chunked_slabs_in_pool > 0); |
386 | 387 | // If this was an empty slab, it was in the pool. |
387 | 388 | // Now it is no longer in the pool, so update count. |
388 | 389 | --bucket->chunked_slabs_in_pool; |
389 | | - bucket_decrement_pool(bucket); |
| 390 | + uint64_t size_to_sub = bucket_slab_alloc_size(bucket); |
| 391 | + uint64_t old_size = utils_fetch_and_sub_u64( |
| 392 | + &bucket->shared_limits->total_size, size_to_sub); |
| 393 | + (void)old_size; |
| 394 | + assert(old_size >= size_to_sub); |
| 395 | + bucket_update_stats(bucket, 1, -1); |
390 | 396 | } |
391 | 397 | } |
392 | 398 |
|
@@ -422,36 +428,27 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) { |
422 | 428 | in_pool * bucket_slab_alloc_size(bucket); |
423 | 429 | } |
424 | 430 |
|
425 | | -static void bucket_decrement_pool(bucket_t *bucket) { |
426 | | - bucket_update_stats(bucket, 1, -1); |
427 | | - utils_fetch_and_add64(&bucket->shared_limits->total_size, |
428 | | - -(long long)bucket_slab_alloc_size(bucket)); |
429 | | -} |
430 | | - |
431 | 431 | static bool bucket_can_pool(bucket_t *bucket) { |
432 | 432 | size_t new_free_slabs_in_bucket; |
433 | 433 |
|
434 | 434 | new_free_slabs_in_bucket = bucket->chunked_slabs_in_pool + 1; |
435 | 435 |
|
436 | 436 | // we keep at most params.capacity slabs in the pool |
437 | 437 | if (bucket_max_pooled_slabs(bucket) >= new_free_slabs_in_bucket) { |
438 | | - size_t pool_size = 0; |
439 | | - utils_atomic_load_acquire(&bucket->shared_limits->total_size, |
440 | | - &pool_size); |
441 | | - while (true) { |
442 | | - size_t new_pool_size = pool_size + bucket_slab_alloc_size(bucket); |
443 | | - |
444 | | - if (bucket->shared_limits->max_size < new_pool_size) { |
445 | | - break; |
446 | | - } |
447 | | - |
448 | | - if (utils_compare_exchange(&bucket->shared_limits->total_size, |
449 | | - &pool_size, &new_pool_size)) { |
450 | | - ++bucket->chunked_slabs_in_pool; |
451 | | - |
452 | | - bucket_update_stats(bucket, -1, 1); |
453 | | - return true; |
454 | | - } |
| 438 | + |
| 439 | + uint64_t size_to_add = bucket_slab_alloc_size(bucket); |
| 440 | + size_t previous_size = utils_fetch_and_add_u64( |
| 441 | + &bucket->shared_limits->total_size, size_to_add); |
| 442 | + |
| 443 | + if (previous_size + size_to_add <= bucket->shared_limits->max_size) { |
| 444 | + ++bucket->chunked_slabs_in_pool; |
| 445 | + bucket_update_stats(bucket, -1, 1); |
| 446 | + return true; |
| 447 | + } else { |
| 448 | + uint64_t old = utils_fetch_and_sub_u64( |
| 449 | + &bucket->shared_limits->total_size, size_to_add); |
| 450 | + (void)old; |
| 451 | + assert(old >= size_to_add); |
455 | 452 | } |
456 | 453 | } |
457 | 454 |
|
|
0 commit comments