@@ -8,13 +8,17 @@ use rustc_data_structures::sharded::Sharded;
88#[ cfg( not( parallel_compiler) ) ]
99use rustc_data_structures:: sync:: Lock ;
1010use rustc_data_structures:: sync:: WorkerLocal ;
11+ use rustc_index:: vec:: { Idx , IndexVec } ;
1112use std:: default:: Default ;
1213use std:: fmt:: Debug ;
1314use std:: hash:: Hash ;
1415use std:: marker:: PhantomData ;
1516
16- pub trait CacheSelector < K , V > {
17- type Cache ;
17+ pub trait CacheSelector < ' tcx , V > {
18+ type Cache
19+ where
20+ V : Clone ;
21+ type ArenaCache ;
1822}
1923
2024pub trait QueryStorage {
@@ -47,10 +51,13 @@ pub trait QueryCache: QueryStorage + Sized {
4751 fn iter ( & self , f : & mut dyn FnMut ( & Self :: Key , & Self :: Value , DepNodeIndex ) ) ;
4852}
4953
50- pub struct DefaultCacheSelector ;
54+ pub struct DefaultCacheSelector < K > ( PhantomData < K > ) ;
5155
52- impl < K : Eq + Hash , V : Clone > CacheSelector < K , V > for DefaultCacheSelector {
53- type Cache = DefaultCache < K , V > ;
56+ impl < ' tcx , K : Eq + Hash , V : ' tcx > CacheSelector < ' tcx , V > for DefaultCacheSelector < K > {
57+ type Cache = DefaultCache < K , V >
58+ where
59+ V : Clone ;
60+ type ArenaCache = ArenaCache < ' tcx , K , V > ;
5461}
5562
5663pub struct DefaultCache < K , V > {
@@ -134,12 +141,6 @@ where
134141 }
135142}
136143
137- pub struct ArenaCacheSelector < ' tcx > ( PhantomData < & ' tcx ( ) > ) ;
138-
139- impl < ' tcx , K : Eq + Hash , V : ' tcx > CacheSelector < K , V > for ArenaCacheSelector < ' tcx > {
140- type Cache = ArenaCache < ' tcx , K , V > ;
141- }
142-
143144pub struct ArenaCache < ' tcx , K , V > {
144145 arena : WorkerLocal < TypedArena < ( V , DepNodeIndex ) > > ,
145146 #[ cfg( parallel_compiler) ]
@@ -224,3 +225,183 @@ where
224225 }
225226 }
226227}
228+
229+ pub struct VecCacheSelector < K > ( PhantomData < K > ) ;
230+
231+ impl < ' tcx , K : Idx , V : ' tcx > CacheSelector < ' tcx , V > for VecCacheSelector < K > {
232+ type Cache = VecCache < K , V >
233+ where
234+ V : Clone ;
235+ type ArenaCache = VecArenaCache < ' tcx , K , V > ;
236+ }
237+
238+ pub struct VecCache < K : Idx , V > {
239+ #[ cfg( parallel_compiler) ]
240+ cache : Sharded < IndexVec < K , Option < ( V , DepNodeIndex ) > > > ,
241+ #[ cfg( not( parallel_compiler) ) ]
242+ cache : Lock < IndexVec < K , Option < ( V , DepNodeIndex ) > > > ,
243+ }
244+
245+ impl < K : Idx , V > Default for VecCache < K , V > {
246+ fn default ( ) -> Self {
247+ VecCache { cache : Default :: default ( ) }
248+ }
249+ }
250+
251+ impl < K : Eq + Idx , V : Clone + Debug > QueryStorage for VecCache < K , V > {
252+ type Value = V ;
253+ type Stored = V ;
254+
255+ #[ inline]
256+ fn store_nocache ( & self , value : Self :: Value ) -> Self :: Stored {
257+ // We have no dedicated storage
258+ value
259+ }
260+ }
261+
262+ impl < K , V > QueryCache for VecCache < K , V >
263+ where
264+ K : Eq + Idx + Clone + Debug ,
265+ V : Clone + Debug ,
266+ {
267+ type Key = K ;
268+
269+ #[ inline( always) ]
270+ fn lookup < R , OnHit > ( & self , key : & K , on_hit : OnHit ) -> Result < R , ( ) >
271+ where
272+ OnHit : FnOnce ( & V , DepNodeIndex ) -> R ,
273+ {
274+ #[ cfg( parallel_compiler) ]
275+ let lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
276+ #[ cfg( not( parallel_compiler) ) ]
277+ let lock = self . cache . lock ( ) ;
278+ if let Some ( Some ( value) ) = lock. get ( * key) {
279+ let hit_result = on_hit ( & value. 0 , value. 1 ) ;
280+ Ok ( hit_result)
281+ } else {
282+ Err ( ( ) )
283+ }
284+ }
285+
286+ #[ inline]
287+ fn complete ( & self , key : K , value : V , index : DepNodeIndex ) -> Self :: Stored {
288+ #[ cfg( parallel_compiler) ]
289+ let mut lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
290+ #[ cfg( not( parallel_compiler) ) ]
291+ let mut lock = self . cache . lock ( ) ;
292+ lock. insert ( key, ( value. clone ( ) , index) ) ;
293+ value
294+ }
295+
296+ fn iter ( & self , f : & mut dyn FnMut ( & Self :: Key , & Self :: Value , DepNodeIndex ) ) {
297+ #[ cfg( parallel_compiler) ]
298+ {
299+ let shards = self . cache . lock_shards ( ) ;
300+ for shard in shards. iter ( ) {
301+ for ( k, v) in shard. iter_enumerated ( ) {
302+ if let Some ( v) = v {
303+ f ( & k, & v. 0 , v. 1 ) ;
304+ }
305+ }
306+ }
307+ }
308+ #[ cfg( not( parallel_compiler) ) ]
309+ {
310+ let map = self . cache . lock ( ) ;
311+ for ( k, v) in map. iter_enumerated ( ) {
312+ if let Some ( v) = v {
313+ f ( & k, & v. 0 , v. 1 ) ;
314+ }
315+ }
316+ }
317+ }
318+ }
319+
320+ pub struct VecArenaCache < ' tcx , K : Idx , V > {
321+ arena : WorkerLocal < TypedArena < ( V , DepNodeIndex ) > > ,
322+ #[ cfg( parallel_compiler) ]
323+ cache : Sharded < IndexVec < K , Option < & ' tcx ( V , DepNodeIndex ) > > > ,
324+ #[ cfg( not( parallel_compiler) ) ]
325+ cache : Lock < IndexVec < K , Option < & ' tcx ( V , DepNodeIndex ) > > > ,
326+ }
327+
328+ impl < ' tcx , K : Idx , V > Default for VecArenaCache < ' tcx , K , V > {
329+ fn default ( ) -> Self {
330+ VecArenaCache {
331+ arena : WorkerLocal :: new ( |_| TypedArena :: default ( ) ) ,
332+ cache : Default :: default ( ) ,
333+ }
334+ }
335+ }
336+
337+ impl < ' tcx , K : Eq + Idx , V : Debug + ' tcx > QueryStorage for VecArenaCache < ' tcx , K , V > {
338+ type Value = V ;
339+ type Stored = & ' tcx V ;
340+
341+ #[ inline]
342+ fn store_nocache ( & self , value : Self :: Value ) -> Self :: Stored {
343+ let value = self . arena . alloc ( ( value, DepNodeIndex :: INVALID ) ) ;
344+ let value = unsafe { & * ( & value. 0 as * const _ ) } ;
345+ & value
346+ }
347+ }
348+
349+ impl < ' tcx , K , V : ' tcx > QueryCache for VecArenaCache < ' tcx , K , V >
350+ where
351+ K : Eq + Idx + Clone + Debug ,
352+ V : Debug ,
353+ {
354+ type Key = K ;
355+
356+ #[ inline( always) ]
357+ fn lookup < R , OnHit > ( & self , key : & K , on_hit : OnHit ) -> Result < R , ( ) >
358+ where
359+ OnHit : FnOnce ( & & ' tcx V , DepNodeIndex ) -> R ,
360+ {
361+ #[ cfg( parallel_compiler) ]
362+ let lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
363+ #[ cfg( not( parallel_compiler) ) ]
364+ let lock = self . cache . lock ( ) ;
365+ if let Some ( Some ( value) ) = lock. get ( * key) {
366+ let hit_result = on_hit ( & & value. 0 , value. 1 ) ;
367+ Ok ( hit_result)
368+ } else {
369+ Err ( ( ) )
370+ }
371+ }
372+
373+ #[ inline]
374+ fn complete ( & self , key : K , value : V , index : DepNodeIndex ) -> Self :: Stored {
375+ let value = self . arena . alloc ( ( value, index) ) ;
376+ let value = unsafe { & * ( value as * const _ ) } ;
377+ #[ cfg( parallel_compiler) ]
378+ let mut lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
379+ #[ cfg( not( parallel_compiler) ) ]
380+ let mut lock = self . cache . lock ( ) ;
381+ lock. insert ( key, value) ;
382+ & value. 0
383+ }
384+
385+ fn iter ( & self , f : & mut dyn FnMut ( & Self :: Key , & Self :: Value , DepNodeIndex ) ) {
386+ #[ cfg( parallel_compiler) ]
387+ {
388+ let shards = self . cache . lock_shards ( ) ;
389+ for shard in shards. iter ( ) {
390+ for ( k, v) in shard. iter_enumerated ( ) {
391+ if let Some ( v) = v {
392+ f ( & k, & v. 0 , v. 1 ) ;
393+ }
394+ }
395+ }
396+ }
397+ #[ cfg( not( parallel_compiler) ) ]
398+ {
399+ let map = self . cache . lock ( ) ;
400+ for ( k, v) in map. iter_enumerated ( ) {
401+ if let Some ( v) = v {
402+ f ( & k, & v. 0 , v. 1 ) ;
403+ }
404+ }
405+ }
406+ }
407+ }
0 commit comments