Skip to content

Commit 4dae1be

Browse files
authored
fast naming convention (#14)
1 parent 0fa8070 commit 4dae1be

File tree

6 files changed

+35
-2
lines changed

6 files changed

+35
-2
lines changed

Lightweight.Caching/Lru/ConcurrentLru.cs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,5 +12,7 @@ public ConcurrentLru(int concurrencyLevel, int capacity, IEqualityComparer<K> co
1212
: base(concurrencyLevel, capacity, comparer, new LruPolicy<K, V>(), new HitCounter())
1313
{
1414
}
15+
16+
public double HitRatio => this.hitCounter.HitRatio;
1517
}
1618
}

Lightweight.Caching/Lru/ConcurrentLruTemplate.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ public class ConcurrentLruTemplate<K, V, I, P, H> : ICache<K, V>
3232

3333
// Since H is a struct, making it readonly will force the runtime to make defensive copies
3434
// if mutate methods are called. Therefore, field must be mutable to maintain count.
35-
private H hitCounter;
35+
protected H hitCounter;
3636

3737
public ConcurrentLruTemplate(
3838
int concurrencyLevel,
@@ -58,7 +58,7 @@ public ConcurrentLruTemplate(
5858

5959
public int Count => this.hotCount + this.warmCount + this.coldCount;
6060

61-
public double HitRatio => this.hitCounter.HitRatio;
61+
6262

6363
public int HotCount => this.hotCount;
6464

Lightweight.Caching/Lru/ConcurrentTLru.cs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,5 +12,7 @@ public ConcurrentTLru(int concurrencyLevel, int capacity, IEqualityComparer<K> c
1212
: base(concurrencyLevel, capacity, comparer, new TlruPolicy<K, V>(timeToLive), new HitCounter())
1313
{
1414
}
15+
16+
public double HitRatio => this.hitCounter.HitRatio;
1517
}
1618
}
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
using System;
2+
using System.Collections.Generic;
3+
using System.Text;
4+
5+
namespace Lightweight.Caching.Lru
6+
{
7+
public sealed class FastConcurrentLru<K, V> : ConcurrentLruTemplate<K, V, LruItem<K, V>, LruPolicy<K, V>, NullHitCounter>
8+
{
9+
public FastConcurrentLru(int concurrencyLevel, int capacity, IEqualityComparer<K> comparer)
10+
: base(concurrencyLevel, capacity, comparer, new LruPolicy<K, V>(), new NullHitCounter())
11+
{
12+
}
13+
}
14+
}
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
using System;
2+
using System.Collections.Generic;
3+
using System.Text;
4+
5+
namespace Lightweight.Caching.Lru
6+
{
7+
public class FastConcurrentTLru<K, V> : ConcurrentLruTemplate<K, V, TimeStampedLruItem<K, V>, TlruPolicy<K, V>, NullHitCounter>
8+
{
9+
public FastConcurrentTLru(int concurrencyLevel, int capacity, IEqualityComparer<K> comparer, TimeSpan timeToLive)
10+
: base(concurrencyLevel, capacity, comparer, new TlruPolicy<K, V>(timeToLive), new NullHitCounter())
11+
{
12+
}
13+
}
14+
}

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ LRU implementations are intended as an alternative to the System.Runtime.Caching
1313
| ClassicLru | Bounded size LRU based with strict ordering.<br><br>Use if ordering is important, but data structures are synchronized with a lock which limits scalability. |
1414
| ConcurrentLru | Bounded size pseudo LRU.<br><br>For when you want a ConcurrentDictionary, but with bounded size. Maintains psuedo order, but is faster than ClassicLru and not prone to lock contention. |
1515
| ConcurrentTlru | Bounded size pseudo LRU, items have TTL.<br><br>Same as ConcurrentLru, but with a [time aware least recently used (TLRU)](https://en.wikipedia.org/wiki/Cache_replacement_policies#Time_aware_least_recently_used_(TLRU)) eviction policy. |
16+
| FastConcurrentLru/FastConcurrentTLru | Same as ConcurrentLru/ConcurrentTLru, but with hit counting logic eliminated making them about 10% faster. |
1617
| SingletonCache | Cache singletons by key. Discard when not in use. <br><br> Cache a semaphore per user, where user population is large, but active user count is low. |
1718

1819
# Performance

0 commit comments

Comments
 (0)