@@ -51,6 +51,7 @@ AllocationClass::AllocationClass(ClassId classId,
5151 allocationSize_(allocSize),
5252 slabAlloc_(s),
5353 freedAllocations_{slabAlloc_.createSingleTierPtrCompressor <FreeAlloc>()} {
54+ curAllocatedSlabs_ = allocatedSlabs_.size ();
5455 checkState ();
5556}
5657
@@ -87,6 +88,12 @@ void AllocationClass::checkState() const {
8788 " Current allocation slab {} is not in allocated slabs list" ,
8889 currSlab_));
8990 }
91+
92+ if (curAllocatedSlabs_ != allocatedSlabs_.size ()) {
93+ throw std::invalid_argument (folly::sformat (
94+ " Mismatch in allocated slabs numbers"
95+ ));
96+ }
9097}
9198
9299// TODO(stuclar): Add poolId to the metadata to be serialized when cache shuts
@@ -116,10 +123,12 @@ AllocationClass::AllocationClass(
116123 freeSlabs_.push_back (slabAlloc_.getSlabForIdx (freeSlabIdx));
117124 }
118125
126+ curAllocatedSlabs_ = allocatedSlabs_.size ();
119127 checkState ();
120128}
121129
122130void AllocationClass::addSlabLocked (Slab* slab) {
131+ curAllocatedSlabs_.fetch_add (1 , std::memory_order_relaxed);
123132 canAllocate_ = true ;
124133 auto header = slabAlloc_.getSlabHeader (slab);
125134 header->classId = classId_;
@@ -168,6 +177,7 @@ void* AllocationClass::allocateLocked() {
168177 }
169178
170179 XDCHECK (canAllocate_);
180+ curAllocatedSize_.fetch_add (getAllocSize (), std::memory_order_relaxed);
171181
172182 // grab from the free list if possible.
173183 if (!freedAllocations_.empty ()) {
@@ -270,6 +280,7 @@ SlabReleaseContext AllocationClass::startSlabRelease(
270280 slab, getId ()));
271281 }
272282 *allocIt = allocatedSlabs_.back ();
283+ curAllocatedSlabs_.fetch_sub (1 , std::memory_order_relaxed);
273284 allocatedSlabs_.pop_back ();
274285
275286 // if slab is being carved currently, then update slabReleaseAllocMap
@@ -511,6 +522,7 @@ void AllocationClass::abortSlabRelease(const SlabReleaseContext& context) {
511522 }
512523 slabReleaseAllocMap_.erase (slabPtrVal);
513524 allocatedSlabs_.push_back (const_cast <Slab*>(slab));
525+ curAllocatedSlabs_.fetch_add (1 , std::memory_order_relaxed);
514526 // restore the classId and allocSize
515527 header->classId = classId_;
516528 header->allocSize = allocationSize_;
@@ -661,6 +673,8 @@ void AllocationClass::free(void* memory) {
661673 freedAllocations_.insert (*reinterpret_cast <FreeAlloc*>(memory));
662674 canAllocate_ = true ;
663675 });
676+
677+ curAllocatedSize_.fetch_sub (getAllocSize (), std::memory_order_relaxed);
664678}
665679
666680serialization::AllocationClassObject AllocationClass::saveState () const {
@@ -699,8 +713,8 @@ ACStats AllocationClass::getStats() const {
699713 const unsigned long long nFreedAllocs = freedAllocations_.size ();
700714 const unsigned long long nActiveAllocs =
701715 nSlabsAllocated * perSlab - nFreedAllocs - freeAllocsInCurrSlab;
702- return {allocationSize_, perSlab, nSlabsAllocated, freeSlabs_.size (),
703- nFreedAllocs, nActiveAllocs, isFull ()};
716+ return {allocationSize_, perSlab, 0 , nSlabsAllocated, freeSlabs_.size (),
717+ nFreedAllocs, nActiveAllocs, isFull (), util::RollingStats{}, 0.0 };
704718 });
705719}
706720
@@ -723,3 +737,12 @@ std::vector<bool>& AllocationClass::getSlabReleaseAllocMapLocked(
723737 const auto slabPtrVal = getSlabPtrValue (slab);
724738 return slabReleaseAllocMap_.at (slabPtrVal);
725739}
740+
741+ double AllocationClass::approxFreePercentage () const {
742+ if (getNumSlabs () == 0 ) {
743+ return 100.0 ;
744+ }
745+
746+ return 100.0 - 100.0 * static_cast <double >(curAllocatedSize_.load (std::memory_order_relaxed)) /
747+ static_cast <double >(getNumSlabs () * Slab::kSize );
748+ }
0 commit comments