Skip to content

Commit 17e49fb

Browse files
byrnedjigchor
andcommitted
Combined locking with exclusive bit (#38) -
Based on cs part 2 upstream patch. - Use markForEviction only for eviction (findEviction and evictForSlabRelease) but not for item movement. moveForSlabRelease relies on markMoving(). Only allow to mark item as exclusive if ref count is 0. This ensures that after item is marked eviction cannot fail. This makes it possible to return NULL handle immediately from find if item is marked as exclusive. markMoving() does have those restrictions and still allows readers to obtain a handle to a moving item. Also, add option to use combined locking for MMContainer iteration. - Block readers when item is moving This simplifies moving code a lot. We don't need to worry about any failure other than allocation failure since item is protected from any readers (and by extension, removes). Co-authored-by: Igor Chorążewicz <igor.chorazewicz@intel.com>
1 parent ffd1be8 commit 17e49fb

File tree

16 files changed

+384
-321
lines changed

16 files changed

+384
-321
lines changed

cachelib/allocator/CacheAllocator-inl.h

Lines changed: 176 additions & 142 deletions
Large diffs are not rendered by default.

cachelib/allocator/CacheAllocator.h

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1321,7 +1321,7 @@ class CacheAllocator : public CacheBase {
13211321

13221322
private:
13231323
// wrapper around Item's refcount and active handle tracking
1324-
FOLLY_ALWAYS_INLINE bool incRef(Item& it);
1324+
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(Item& it, bool failIfMoving);
13251325
FOLLY_ALWAYS_INLINE RefcountWithFlags::Value decRef(Item& it);
13261326

13271327
// drops the refcount and if needed, frees the allocation back to the memory
@@ -1552,8 +1552,7 @@ class CacheAllocator : public CacheBase {
15521552
//
15531553
// @return true If the move was completed, and the containers were updated
15541554
// successfully.
1555-
template <typename ItemPtr>
1556-
WriteHandle moveRegularItemOnEviction(ItemPtr& oldItem, WriteHandle& newItemHdl);
1555+
void moveRegularItemWithSync(Item& oldItem, WriteHandle& newItemHdl);
15571556

15581557
// Moves a regular item to a different slab. This should only be used during
15591558
// slab release after the item's exclusive bit has been set. The user supplied
@@ -1722,16 +1721,15 @@ class CacheAllocator : public CacheBase {
17221721
//
17231722
// @return valid handle to the item. This will be the last
17241723
// handle to the item. On failure an empty handle.
1725-
template <typename ItemPtr>
1726-
WriteHandle tryEvictToNextMemoryTier(TierId tid, PoolId pid, ItemPtr& item);
1724+
WriteHandle tryEvictToNextMemoryTier(TierId tid, PoolId pid, Item& item);
17271725

17281726
// Try to move the item down to the next memory tier
17291727
//
17301728
// @param item the item to evict
17311729
//
17321730
// @return valid handle to the item. This will be the last
17331731
// handle to the item. On failure an empty handle.
1734-
WriteHandle tryEvictToNextMemoryTier(Item* item);
1732+
WriteHandle tryEvictToNextMemoryTier(Item& item);
17351733

17361734
// Deserializer CacheAllocatorMetadata and verify the version
17371735
//
@@ -1988,10 +1986,6 @@ class CacheAllocator : public CacheBase {
19881986
static bool itemExpiryPredicate(const Item& item) {
19891987
return item.getRefCount() == 1 && item.isExpired();
19901988
}
1991-
1992-
static bool itemExclusivePredicate(const Item& item) {
1993-
return item.getRefCount() == 0;
1994-
}
19951989

19961990
std::unique_ptr<Deserializer> createDeserializer();
19971991

@@ -2044,8 +2038,9 @@ class CacheAllocator : public CacheBase {
20442038

20452039
size_t memoryTierSize(TierId tid) const;
20462040

2047-
bool addWaitContextForMovingItem(
2048-
folly::StringPiece key, std::shared_ptr<WaitContext<ReadHandle>> waiter);
2041+
WriteHandle handleWithWaitContextForMovingItem(Item& item);
2042+
2043+
size_t wakeUpWaiters(folly::StringPiece key, WriteHandle&& handle);
20492044

20502045
class MoveCtx {
20512046
public:
@@ -2070,6 +2065,8 @@ class CacheAllocator : public CacheBase {
20702065
waiters.push_back(std::move(waiter));
20712066
}
20722067

2068+
size_t numWaiters() const { return waiters.size(); }
2069+
20732070
private:
20742071
// notify all pending waiters that are waiting for the fetch.
20752072
void wakeUpWaiters() {

cachelib/allocator/CacheItem-inl.h

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -238,8 +238,8 @@ bool CacheItem<CacheTrait>::markForEvictionWhenMoving() {
238238
}
239239

240240
template <typename CacheTrait>
241-
bool CacheItem<CacheTrait>::markMoving() {
242-
return ref_.markMoving();
241+
bool CacheItem<CacheTrait>::markMoving(bool failIfRefNotZero) {
242+
return ref_.markMoving(failIfRefNotZero);
243243
}
244244

245245
template <typename CacheTrait>
@@ -287,21 +287,6 @@ bool CacheItem<CacheTrait>::isNvmEvicted() const noexcept {
287287
return ref_.isNvmEvicted();
288288
}
289289

290-
template <typename CacheTrait>
291-
void CacheItem<CacheTrait>::markIncomplete() noexcept {
292-
ref_.markIncomplete();
293-
}
294-
295-
template <typename CacheTrait>
296-
void CacheItem<CacheTrait>::unmarkIncomplete() noexcept {
297-
ref_.unmarkIncomplete();
298-
}
299-
300-
template <typename CacheTrait>
301-
bool CacheItem<CacheTrait>::isIncomplete() const noexcept {
302-
return ref_.isIncomplete();
303-
}
304-
305290
template <typename CacheTrait>
306291
void CacheItem<CacheTrait>::markIsChainedItem() noexcept {
307292
XDCHECK(!hasChainedItem());

cachelib/allocator/CacheItem.h

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -241,14 +241,6 @@ class CACHELIB_PACKED_ATTR CacheItem {
241241
void unmarkNvmEvicted() noexcept;
242242
bool isNvmEvicted() const noexcept;
243243

244-
/**
245-
* Marks that the item is migrating between memory tiers and
246-
* not ready for access now. Accessing thread should wait.
247-
*/
248-
void markIncomplete() noexcept;
249-
void unmarkIncomplete() noexcept;
250-
bool isIncomplete() const noexcept;
251-
252244
/**
253245
* Function to set the timestamp for when to expire an item
254246
*
@@ -317,9 +309,9 @@ class CACHELIB_PACKED_ATTR CacheItem {
317309
//
318310
// @return true on success, failure if item is marked as exclusive
319311
// @throw exception::RefcountOverflow on ref count overflow
320-
FOLLY_ALWAYS_INLINE bool incRef() {
312+
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(bool failIfMoving) {
321313
try {
322-
return ref_.incRef();
314+
return ref_.incRef(failIfMoving);
323315
} catch (exception::RefcountOverflow& e) {
324316
throw exception::RefcountOverflow(
325317
folly::sformat("{} item: {}", e.what(), toString()));
@@ -386,7 +378,7 @@ class CACHELIB_PACKED_ATTR CacheItem {
386378
* Unmarking moving will also return the refcount at the moment of
387379
* unmarking.
388380
*/
389-
bool markMoving();
381+
bool markMoving(bool failIfRefNotZero);
390382
RefcountWithFlags::Value unmarkMoving() noexcept;
391383
bool isMoving() const noexcept;
392384
bool isOnlyMoving() const noexcept;

cachelib/allocator/Handle.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -488,13 +488,6 @@ struct ReadHandleImpl {
488488
// Handle which has the item already
489489
FOLLY_ALWAYS_INLINE ReadHandleImpl(Item* it, CacheT& alloc) noexcept
490490
: alloc_(&alloc), it_(it) {
491-
if (it_ && it_->isIncomplete()) {
492-
waitContext_ = std::make_shared<ItemWaitContext>(alloc);
493-
if (!alloc_->addWaitContextForMovingItem(it->getKey(), waitContext_)) {
494-
waitContext_->discard();
495-
waitContext_.reset();
496-
}
497-
}
498491
}
499492

500493
// handle that has a wait context allocated. Used for async handles

cachelib/allocator/MMLru-inl.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,18 @@ void MMLru::Container<T, HookPtr>::withEvictionIterator(F&& fun) {
229229
}
230230
}
231231

232+
//template <typename T, MMLru::Hook<T> T::*HookPtr>
233+
//template <typename F>
234+
//void
235+
//MMLru::Container<T, HookPtr>::withPromotionIterator(F&& fun) {
236+
// if (config_.useCombinedLockForIterators) {
237+
// lruMutex_->lock_combine([this, &fun]() { fun(Iterator{lru_.begin()}); });
238+
// } else {
239+
// LockHolder lck{*lruMutex_};
240+
// fun(Iterator{lru_.begin()});
241+
// }
242+
//}
243+
232244
template <typename T, MMLru::Hook<T> T::*HookPtr>
233245
void MMLru::Container<T, HookPtr>::ensureNotInsertionPoint(T& node) noexcept {
234246
// If we are removing the insertion point node, grow tail before we remove

cachelib/allocator/MMLru.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -230,12 +230,13 @@ class MMLru {
230230
// lruInsertionPointSpec = 2, we insert at a point 1/4th from tail
231231
uint8_t lruInsertionPointSpec{0};
232232

233+
// Whether to use combined locking for withEvictionIterator.
234+
bool useCombinedLockForIterators{true};
235+
233236
// Minimum interval between reconfigurations. If 0, reconfigure is never
234237
// called.
235238
std::chrono::seconds mmReconfigureIntervalSecs{};
236239

237-
// Whether to use combined locking for withEvictionIterator.
238-
bool useCombinedLockForIterators{false};
239240
};
240241

241242
// The container object which can be used to keep track of objects of type

cachelib/allocator/Refcount.h

Lines changed: 41 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,6 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
116116
// unevictable in the past.
117117
kUnevictable_NOOP,
118118

119-
// Item is accecible but content is not ready yet. Used by eviction
120-
// when Item is moved between memory tiers.
121-
kIncomplete,
122-
123119
// Unused. This is just to indciate the maximum number of flags
124120
kFlagMax,
125121
};
@@ -134,30 +130,47 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
134130
RefcountWithFlags& operator=(const RefcountWithFlags&) = delete;
135131
RefcountWithFlags(RefcountWithFlags&&) = delete;
136132
RefcountWithFlags& operator=(RefcountWithFlags&&) = delete;
137-
133+
enum incResult {
134+
incOk,
135+
incFailedMoving,
136+
incFailedEviction
137+
};
138138
// Bumps up the reference count only if the new count will be strictly less
139139
// than or equal to the maxCount and the item is not exclusive
140140
// @return true if refcount is bumped. false otherwise (if item is exclusive)
141141
// @throw exception::RefcountOverflow if new count would be greater than
142142
// maxCount
143-
FOLLY_ALWAYS_INLINE bool incRef() {
144-
auto predicate = [](const Value curValue) {
145-
Value bitMask = getAdminRef<kExclusive>();
146-
147-
const bool exlusiveBitIsSet = curValue & bitMask;
148-
if (UNLIKELY((curValue & kAccessRefMask) == (kAccessRefMask))) {
149-
throw exception::RefcountOverflow("Refcount maxed out.");
150-
}
151-
152-
// Check if the item is not marked for eviction
153-
return !exlusiveBitIsSet || ((curValue & kAccessRefMask) != 0);
154-
};
155-
156-
auto newValue = [](const Value curValue) {
157-
return (curValue + static_cast<Value>(1));
158-
};
159-
160-
return atomicUpdateValue(predicate, newValue);
143+
FOLLY_ALWAYS_INLINE incResult incRef(bool failIfMoving) {
144+
auto predicate = [failIfMoving](const Value curValue) {
145+
Value bitMask = getAdminRef<kExclusive>();
146+
147+
const bool exlusiveBitIsSet = curValue & bitMask;
148+
if (UNLIKELY((curValue & kAccessRefMask) == (kAccessRefMask))) {
149+
throw exception::RefcountOverflow("Refcount maxed out.");
150+
} else if (exlusiveBitIsSet && (curValue & kAccessRefMask) == 0) {
151+
return false;
152+
} else if (exlusiveBitIsSet && failIfMoving) {
153+
return false;
154+
}
155+
return true;
156+
};
157+
158+
auto newValue = [](const Value curValue) {
159+
return (curValue + static_cast<Value>(1));
160+
};
161+
162+
if (atomicUpdateValue(predicate, newValue)) {
163+
return incOk;
164+
}
165+
Value val = __atomic_load_n(&refCount_, __ATOMIC_RELAXED);
166+
Value bitMask = getAdminRef<kExclusive>();
167+
const bool exlusiveBitIsSet = val & bitMask;
168+
if (exlusiveBitIsSet & (val & kAccessRefMask) == 0) {
169+
return incFailedEviction;
170+
} else if (exlusiveBitIsSet) {
171+
return incFailedMoving;
172+
}
173+
return incFailedEviction;
161174
}
162175

163176
// Bumps down the reference count
@@ -313,12 +326,14 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
313326
*
314327
* Unmarking moving does not depend on `isInMMContainer`
315328
*/
316-
bool markMoving() {
317-
auto predicate = [](const Value curValue) {
329+
bool markMoving(bool failIfRefNotZero) {
330+
auto predicate = [failIfRefNotZero](const Value curValue) {
318331
Value conditionBitMask = getAdminRef<kLinked>();
319332
const bool flagSet = curValue & conditionBitMask;
320333
const bool alreadyExclusive = curValue & getAdminRef<kExclusive>();
321-
334+
if (failIfRefNotZero && (curValue & kAccessRefMask) != 0) {
335+
return false;
336+
}
322337
if (!flagSet || alreadyExclusive) {
323338
return false;
324339
}
@@ -419,14 +434,6 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
419434
void unmarkNvmEvicted() noexcept { return unSetFlag<kNvmEvicted>(); }
420435
bool isNvmEvicted() const noexcept { return isFlagSet<kNvmEvicted>(); }
421436

422-
/**
423-
* Marks that the item is migrating between memory tiers and
424-
* not ready for access now. Accessing thread should wait.
425-
*/
426-
void markIncomplete() noexcept { return setFlag<kIncomplete>(); }
427-
void unmarkIncomplete() noexcept { return unSetFlag<kIncomplete>(); }
428-
bool isIncomplete() const noexcept { return isFlagSet<kIncomplete>(); }
429-
430437
// Whether or not an item is completely drained of access
431438
// Refcount is 0 and the item is not linked, accessible, nor exclusive
432439
bool isDrained() const noexcept { return getRefWithAccessAndAdmin() == 0; }

cachelib/allocator/tests/ItemHandleTest.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@ struct TestItem {
4141
void reset() {}
4242

4343
folly::StringPiece getKey() const { return folly::StringPiece(); }
44-
45-
bool isIncomplete() const { return false; }
4644
};
4745

4846
struct TestNvmCache;

cachelib/allocator/tests/ItemTest.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,8 +82,10 @@ TEST(ItemTest, ExpiryTime) {
8282
EXPECT_TRUE(result);
8383
EXPECT_EQ(tenMins, item->getConfiguredTTL());
8484

85+
// So that exclusive bit will be set
86+
item->markAccessible();
8587
// Test that writes fail while the item is moving
86-
result = item->markMoving();
88+
result = item->markMoving(true);
8789
EXPECT_TRUE(result);
8890
result = item->updateExpiryTime(0);
8991
EXPECT_FALSE(result);

0 commit comments

Comments
 (0)