@@ -14,15 +14,15 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
1414use crate :: HandleCycleError ;
1515use rustc_data_structures:: fingerprint:: Fingerprint ;
1616use rustc_data_structures:: fx:: FxHashMap ;
17- use rustc_data_structures:: sharded:: Sharded ;
17+ use rustc_data_structures:: sharded:: { self , Sharded } ;
1818use rustc_data_structures:: stack:: ensure_sufficient_stack;
1919use rustc_data_structures:: sync:: Lock ;
2020#[ cfg( parallel_compiler) ]
2121use rustc_data_structures:: { cold_path, sync} ;
2222use rustc_errors:: { DiagnosticBuilder , ErrorGuaranteed , FatalError } ;
2323use rustc_span:: { Span , DUMMY_SP } ;
2424use std:: cell:: Cell ;
25- use std:: collections:: hash_map:: Entry ;
25+ use std:: collections:: hash_map:: RawEntryMut ;
2626use std:: fmt:: Debug ;
2727use std:: hash:: Hash ;
2828use std:: mem;
@@ -142,7 +142,7 @@ where
142142{
143143 /// Completes the query by updating the query cache with the `result`,
144144 /// signals the waiter and forgets the JobOwner, so it won't poison the query
145- fn complete < C > ( self , cache : & C , result : C :: Value , dep_node_index : DepNodeIndex )
145+ fn complete < C > ( self , cache : & C , key_hash : u64 , result : C :: Value , dep_node_index : DepNodeIndex )
146146 where
147147 C : QueryCache < Key = K > ,
148148 {
@@ -154,13 +154,17 @@ where
154154
155155 // Mark as complete before we remove the job from the active state
156156 // so no other thread can re-execute this query.
157- cache. complete ( key, result, dep_node_index) ;
157+ cache. complete ( key, key_hash , result, dep_node_index) ;
158158
159159 let job = {
160- let mut lock = state. active . lock_shard_by_value ( & key) ;
161- match lock. remove ( & key) . unwrap ( ) {
162- QueryResult :: Started ( job) => job,
163- QueryResult :: Poisoned => panic ! ( ) ,
160+ let mut lock = state. active . lock_shard_by_hash ( key_hash) ;
161+
162+ match lock. raw_entry_mut ( ) . from_key_hashed_nocheck ( key_hash, & key) {
163+ RawEntryMut :: Vacant ( _) => panic ! ( ) ,
164+ RawEntryMut :: Occupied ( occupied) => match occupied. remove ( ) {
165+ QueryResult :: Started ( job) => job,
166+ QueryResult :: Poisoned => panic ! ( ) ,
167+ } ,
164168 }
165169 } ;
166170
@@ -209,7 +213,8 @@ where
209213 C : QueryCache ,
210214 Tcx : DepContext ,
211215{
212- match cache. lookup ( & key) {
216+ let key_hash = sharded:: make_hash ( key) ;
217+ match cache. lookup ( & key, key_hash) {
213218 Some ( ( value, index) ) => {
214219 tcx. profiler ( ) . query_cache_hit ( index. into ( ) ) ;
215220 tcx. dep_graph ( ) . read_index ( index) ;
@@ -246,6 +251,7 @@ fn wait_for_query<Q, Qcx>(
246251 qcx : Qcx ,
247252 span : Span ,
248253 key : Q :: Key ,
254+ key_hash : u64 ,
249255 latch : QueryLatch ,
250256 current : Option < QueryJobId > ,
251257) -> ( Q :: Value , Option < DepNodeIndex > )
@@ -264,7 +270,7 @@ where
264270
265271 match result {
266272 Ok ( ( ) ) => {
267- let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key) else {
273+ let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key, key_hash ) else {
268274 cold_path ( || {
269275 // We didn't find the query result in the query cache. Check if it was
270276 // poisoned due to a panic instead.
@@ -301,7 +307,8 @@ where
301307 Qcx : QueryContext ,
302308{
303309 let state = query. query_state ( qcx) ;
304- let mut state_lock = state. active . lock_shard_by_value ( & key) ;
310+ let key_hash = sharded:: make_hash ( & key) ;
311+ let mut state_lock = state. active . lock_shard_by_hash ( key_hash) ;
305312
306313 // For the parallel compiler we need to check both the query cache and query state structures
307314 // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -310,28 +317,28 @@ where
310317 // executing, but another thread may have already completed the query and stores it result
311318 // in the query cache.
312319 if cfg ! ( parallel_compiler) && qcx. dep_context ( ) . sess ( ) . threads ( ) > 1 {
313- if let Some ( ( value, index) ) = query. query_cache ( qcx) . lookup ( & key) {
320+ if let Some ( ( value, index) ) = query. query_cache ( qcx) . lookup ( & key, key_hash ) {
314321 qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
315322 return ( value, Some ( index) ) ;
316323 }
317324 }
318325
319326 let current_job_id = qcx. current_query_job ( ) ;
320327
321- match state_lock. entry ( key) {
322- Entry :: Vacant ( entry) => {
328+ match state_lock. raw_entry_mut ( ) . from_key_hashed_nocheck ( key_hash , & key) {
329+ RawEntryMut :: Vacant ( entry) => {
323330 // Nothing has computed or is computing the query, so we start a new job and insert it in the
324331 // state map.
325332 let id = qcx. next_job_id ( ) ;
326333 let job = QueryJob :: new ( id, span, current_job_id) ;
327- entry. insert ( QueryResult :: Started ( job) ) ;
334+ entry. insert_hashed_nocheck ( key_hash , key , QueryResult :: Started ( job) ) ;
328335
329336 // Drop the lock before we start executing the query
330337 drop ( state_lock) ;
331338
332- execute_job :: < _ , _ , INCR > ( query, qcx, state, key, id, dep_node)
339+ execute_job :: < _ , _ , INCR > ( query, qcx, state, key, key_hash , id, dep_node)
333340 }
334- Entry :: Occupied ( mut entry) => {
341+ RawEntryMut :: Occupied ( mut entry) => {
335342 match entry. get_mut ( ) {
336343 QueryResult :: Started ( job) => {
337344 #[ cfg( parallel_compiler) ]
@@ -342,7 +349,15 @@ where
342349
343350 // Only call `wait_for_query` if we're using a Rayon thread pool
344351 // as it will attempt to mark the worker thread as blocked.
345- return wait_for_query ( query, qcx, span, key, latch, current_job_id) ;
352+ return wait_for_query (
353+ query,
354+ qcx,
355+ span,
356+ key,
357+ key_hash,
358+ latch,
359+ current_job_id,
360+ ) ;
346361 }
347362
348363 let id = job. id ;
@@ -364,6 +379,7 @@ fn execute_job<Q, Qcx, const INCR: bool>(
364379 qcx : Qcx ,
365380 state : & QueryState < Q :: Key > ,
366381 key : Q :: Key ,
382+ key_hash : u64 ,
367383 id : QueryJobId ,
368384 dep_node : Option < DepNode > ,
369385) -> ( Q :: Value , Option < DepNodeIndex > )
@@ -395,7 +411,7 @@ where
395411 // This can't happen, as query feeding adds the very dependencies to the fed query
396412 // as its feeding query had. So if the fed query is red, so is its feeder, which will
397413 // get evaluated first, and re-feed the query.
398- if let Some ( ( cached_result, _) ) = cache. lookup ( & key) {
414+ if let Some ( ( cached_result, _) ) = cache. lookup ( & key, key_hash ) {
399415 let Some ( hasher) = query. hash_result ( ) else {
400416 panic ! (
401417 "no_hash fed query later has its value computed.\n \
@@ -427,7 +443,7 @@ where
427443 }
428444 }
429445 }
430- job_owner. complete ( cache, result, dep_node_index) ;
446+ job_owner. complete ( cache, key_hash , result, dep_node_index) ;
431447
432448 ( result, Some ( dep_node_index) )
433449}
@@ -826,7 +842,7 @@ where
826842{
827843 // We may be concurrently trying both execute and force a query.
828844 // Ensure that only one of them runs the query.
829- if let Some ( ( _, index) ) = query. query_cache ( qcx) . lookup ( & key) {
845+ if let Some ( ( _, index) ) = query. query_cache ( qcx) . lookup ( & key, sharded :: make_hash ( & key ) ) {
830846 qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
831847 return ;
832848 }
0 commit comments