11use oorandom:: Rand64 ;
22use parking_lot:: Mutex ;
33use std:: fmt:: Debug ;
4- use std:: sync:: atomic:: AtomicUsize ;
4+ use std:: sync:: atomic:: AtomicU16 ;
55use std:: sync:: atomic:: Ordering ;
66use triomphe:: Arc ;
77
@@ -20,15 +20,15 @@ pub(crate) struct Lru<Node>
2020where
2121 Node : LruNode ,
2222{
23- green_zone : AtomicUsize ,
23+ green_zone : AtomicU16 ,
2424 data : Mutex < LruData < Node > > ,
2525}
2626
2727#[ derive( Debug ) ]
2828struct LruData < Node > {
29- end_red_zone : usize ,
30- end_yellow_zone : usize ,
31- end_green_zone : usize ,
29+ end_red_zone : u16 ,
30+ end_yellow_zone : u16 ,
31+ end_green_zone : u16 ,
3232 rng : Rand64 ,
3333 entries : Vec < Arc < Node > > ,
3434}
@@ -39,9 +39,9 @@ pub(crate) trait LruNode: Sized + Debug {
3939
4040#[ derive( Debug ) ]
4141pub ( crate ) struct LruIndex {
42- /// Index in the appropriate LRU list, or std::usize ::MAX if not a
42+ /// Index in the appropriate LRU list, or std::u16 ::MAX if not a
4343 /// member.
44- index : AtomicUsize ,
44+ index : AtomicU16 ,
4545}
4646
4747impl < Node > Default for Lru < Node >
@@ -68,12 +68,12 @@ where
6868
6969 #[ cfg_attr( not( test) , allow( dead_code) ) ]
7070 fn with_seed ( seed : & str ) -> Self {
71- Lru { green_zone : AtomicUsize :: new ( 0 ) , data : Mutex :: new ( LruData :: with_seed ( seed) ) }
71+ Lru { green_zone : AtomicU16 :: new ( 0 ) , data : Mutex :: new ( LruData :: with_seed ( seed) ) }
7272 }
7373
7474 /// Adjust the total number of nodes permitted to have a value at
7575 /// once. If `len` is zero, this disables LRU caching completely.
76- pub ( crate ) fn set_lru_capacity ( & self , len : usize ) {
76+ pub ( crate ) fn set_lru_capacity ( & self , len : u16 ) {
7777 let mut data = self . data . lock ( ) ;
7878
7979 // We require each zone to have at least 1 slot. Therefore,
@@ -143,23 +143,24 @@ where
143143 LruData { end_yellow_zone : 0 , end_green_zone : 0 , end_red_zone : 0 , entries : Vec :: new ( ) , rng }
144144 }
145145
146- fn green_zone ( & self ) -> std:: ops:: Range < usize > {
146+ fn green_zone ( & self ) -> std:: ops:: Range < u16 > {
147147 0 ..self . end_green_zone
148148 }
149149
150- fn yellow_zone ( & self ) -> std:: ops:: Range < usize > {
150+ fn yellow_zone ( & self ) -> std:: ops:: Range < u16 > {
151151 self . end_green_zone ..self . end_yellow_zone
152152 }
153153
154- fn red_zone ( & self ) -> std:: ops:: Range < usize > {
154+ fn red_zone ( & self ) -> std:: ops:: Range < u16 > {
155155 self . end_yellow_zone ..self . end_red_zone
156156 }
157157
158- fn resize ( & mut self , len_green_zone : usize , len_yellow_zone : usize , len_red_zone : usize ) {
158+ fn resize ( & mut self , len_green_zone : u16 , len_yellow_zone : u16 , len_red_zone : u16 ) {
159159 self . end_green_zone = len_green_zone;
160160 self . end_yellow_zone = self . end_green_zone + len_yellow_zone;
161161 self . end_red_zone = self . end_yellow_zone + len_red_zone;
162- let entries = std:: mem:: replace ( & mut self . entries , Vec :: with_capacity ( self . end_red_zone ) ) ;
162+ let entries =
163+ std:: mem:: replace ( & mut self . entries , Vec :: with_capacity ( self . end_red_zone as usize ) ) ;
163164
164165 tracing:: debug!( "green_zone = {:?}" , self . green_zone( ) ) ;
165166 tracing:: debug!( "yellow_zone = {:?}" , self . yellow_zone( ) ) ;
@@ -207,7 +208,7 @@ where
207208
208209 // Easy case: we still have capacity. Push it, and then promote
209210 // it up to the appropriate zone.
210- let len = self . entries . len ( ) ;
211+ let len = self . entries . len ( ) as u16 ;
211212 if len < self . end_red_zone {
212213 self . entries . push ( node. clone ( ) ) ;
213214 node. lru_index ( ) . store ( len) ;
@@ -218,7 +219,7 @@ where
218219 // Harder case: no capacity. Create some by evicting somebody from red
219220 // zone and then promoting.
220221 let victim_index = self . pick_index ( self . red_zone ( ) ) ;
221- let victim_node = std:: mem:: replace ( & mut self . entries [ victim_index] , node. clone ( ) ) ;
222+ let victim_node = std:: mem:: replace ( & mut self . entries [ victim_index as usize ] , node. clone ( ) ) ;
222223 tracing:: debug!( "evicting red node {:?} from {}" , victim_node, victim_index) ;
223224 victim_node. lru_index ( ) . clear ( ) ;
224225 self . promote_red_to_green ( node, victim_index) ;
@@ -231,7 +232,7 @@ where
231232 ///
232233 /// NB: It is not required that `node.lru_index()` is up-to-date
233234 /// when entering this method.
234- fn promote_red_to_green ( & mut self , node : & Arc < Node > , red_index : usize ) {
235+ fn promote_red_to_green ( & mut self , node : & Arc < Node > , red_index : u16 ) {
235236 debug_assert ! ( self . red_zone( ) . contains( & red_index) ) ;
236237
237238 // Pick a yellow at random and switch places with it.
@@ -242,12 +243,12 @@ where
242243 let yellow_index = self . pick_index ( self . yellow_zone ( ) ) ;
243244 tracing:: debug!(
244245 "demoting yellow node {:?} from {} to red at {}" ,
245- self . entries[ yellow_index] ,
246+ self . entries[ yellow_index as usize ] ,
246247 yellow_index,
247248 red_index,
248249 ) ;
249- self . entries . swap ( yellow_index, red_index) ;
250- self . entries [ red_index] . lru_index ( ) . store ( red_index) ;
250+ self . entries . swap ( yellow_index as usize , red_index as usize ) ;
251+ self . entries [ red_index as usize ] . lru_index ( ) . store ( red_index) ;
251252
252253 // Now move ourselves up into the green zone.
253254 self . promote_yellow_to_green ( node, yellow_index) ;
@@ -259,51 +260,51 @@ where
259260 ///
260261 /// NB: It is not required that `node.lru_index()` is up-to-date
261262 /// when entering this method.
262- fn promote_yellow_to_green ( & mut self , node : & Arc < Node > , yellow_index : usize ) {
263+ fn promote_yellow_to_green ( & mut self , node : & Arc < Node > , yellow_index : u16 ) {
263264 debug_assert ! ( self . yellow_zone( ) . contains( & yellow_index) ) ;
264265
265266 // Pick a yellow at random and switch places with it.
266267 let green_index = self . pick_index ( self . green_zone ( ) ) ;
267268 tracing:: debug!(
268269 "demoting green node {:?} from {} to yellow at {}" ,
269- self . entries[ green_index] ,
270+ self . entries[ green_index as usize ] ,
270271 green_index,
271272 yellow_index
272273 ) ;
273- self . entries . swap ( green_index, yellow_index) ;
274- self . entries [ yellow_index] . lru_index ( ) . store ( yellow_index) ;
274+ self . entries . swap ( green_index as usize , yellow_index as usize ) ;
275+ self . entries [ yellow_index as usize ] . lru_index ( ) . store ( yellow_index) ;
275276 node. lru_index ( ) . store ( green_index) ;
276277
277278 tracing:: debug!( "promoted {:?} to green index {}" , node, green_index) ;
278279 }
279280
280- fn pick_index ( & mut self , zone : std:: ops:: Range < usize > ) -> usize {
281- let end_index = std:: cmp:: min ( zone. end , self . entries . len ( ) ) ;
282- self . rng . rand_range ( zone. start as u64 ..end_index as u64 ) as usize
281+ fn pick_index ( & mut self , zone : std:: ops:: Range < u16 > ) -> u16 {
282+ let end_index = std:: cmp:: min ( zone. end , self . entries . len ( ) as u16 ) ;
283+ self . rng . rand_range ( zone. start as u64 ..end_index as u64 ) as u16
283284 }
284285}
285286
286287impl Default for LruIndex {
287288 fn default ( ) -> Self {
288- Self { index : AtomicUsize :: new ( usize :: MAX ) }
289+ Self { index : AtomicU16 :: new ( u16 :: MAX ) }
289290 }
290291}
291292
292293impl LruIndex {
293- fn load ( & self ) -> usize {
294+ fn load ( & self ) -> u16 {
294295 self . index . load ( Ordering :: Acquire ) // see note on ordering below
295296 }
296297
297- fn store ( & self , value : usize ) {
298+ fn store ( & self , value : u16 ) {
298299 self . index . store ( value, Ordering :: Release ) // see note on ordering below
299300 }
300301
301302 fn clear ( & self ) {
302- self . store ( usize :: MAX ) ;
303+ self . store ( u16 :: MAX ) ;
303304 }
304305
305306 fn is_in_lru ( & self ) -> bool {
306- self . load ( ) != usize :: MAX
307+ self . load ( ) != u16 :: MAX
307308 }
308309}
309310
0 commit comments