@@ -12,26 +12,27 @@ use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobI
1212use crate :: query:: SerializedDepNodeIndex ;
1313use crate :: query:: { QueryContext , QueryMap , QuerySideEffects , QueryStackFrame } ;
1414use crate :: HandleCycleError ;
15+ use hashbrown:: hash_map:: RawEntryMut ;
1516use rustc_data_structures:: fingerprint:: Fingerprint ;
16- use rustc_data_structures:: fx:: FxHashMap ;
17- use rustc_data_structures:: sharded:: Sharded ;
17+ use rustc_data_structures:: sharded:: { self , Sharded } ;
1818use rustc_data_structures:: stack:: ensure_sufficient_stack;
1919use rustc_data_structures:: sync:: Lock ;
2020#[ cfg( parallel_compiler) ]
2121use rustc_data_structures:: { cold_path, sync} ;
2222use rustc_errors:: { DiagnosticBuilder , ErrorGuaranteed , FatalError } ;
23+ use rustc_hash:: FxHasher ;
2324use rustc_span:: { Span , DUMMY_SP } ;
2425use std:: cell:: Cell ;
25- use std:: collections:: hash_map:: Entry ;
2626use std:: fmt:: Debug ;
27+ use std:: hash:: BuildHasherDefault ;
2728use std:: hash:: Hash ;
2829use std:: mem;
2930use thin_vec:: ThinVec ;
3031
3132use super :: QueryConfig ;
3233
3334pub struct QueryState < K > {
34- active : Sharded < FxHashMap < K , QueryResult > > ,
35+ active : Sharded < hashbrown :: HashMap < K , QueryResult , BuildHasherDefault < FxHasher > > > ,
3536}
3637
3738/// Indicates the state of a query for a given key in a query map.
@@ -142,7 +143,7 @@ where
142143{
143144 /// Completes the query by updating the query cache with the `result`,
144145 /// signals the waiter and forgets the JobOwner, so it won't poison the query
145- fn complete < C > ( self , cache : & C , result : C :: Value , dep_node_index : DepNodeIndex )
146+ fn complete < C > ( self , cache : & C , key_hash : u64 , result : C :: Value , dep_node_index : DepNodeIndex )
146147 where
147148 C : QueryCache < Key = K > ,
148149 {
@@ -154,13 +155,17 @@ where
154155
155156 // Mark as complete before we remove the job from the active state
156157 // so no other thread can re-execute this query.
157- cache. complete ( key, result, dep_node_index) ;
158+ cache. complete ( key, key_hash , result, dep_node_index) ;
158159
159160 let job = {
160- let mut lock = state. active . lock_shard_by_value ( & key) ;
161- match lock. remove ( & key) . unwrap ( ) {
162- QueryResult :: Started ( job) => job,
163- QueryResult :: Poisoned => panic ! ( ) ,
161+ let mut lock = state. active . lock_shard_by_hash ( key_hash) ;
162+
163+ match lock. raw_entry_mut ( ) . from_key_hashed_nocheck ( key_hash, & key) {
164+ RawEntryMut :: Vacant ( _) => panic ! ( ) ,
165+ RawEntryMut :: Occupied ( occupied) => match occupied. remove ( ) {
166+ QueryResult :: Started ( job) => job,
167+ QueryResult :: Poisoned => panic ! ( ) ,
168+ } ,
164169 }
165170 } ;
166171
@@ -209,7 +214,8 @@ where
209214 C : QueryCache ,
210215 Tcx : DepContext ,
211216{
212- match cache. lookup ( & key) {
217+ let key_hash = sharded:: make_hash ( key) ;
218+ match cache. lookup ( & key, key_hash) {
213219 Some ( ( value, index) ) => {
214220 tcx. profiler ( ) . query_cache_hit ( index. into ( ) ) ;
215221 tcx. dep_graph ( ) . read_index ( index) ;
@@ -246,6 +252,7 @@ fn wait_for_query<Q, Qcx>(
246252 qcx : Qcx ,
247253 span : Span ,
248254 key : Q :: Key ,
255+ key_hash : u64 ,
249256 latch : QueryLatch ,
250257 current : Option < QueryJobId > ,
251258) -> ( Q :: Value , Option < DepNodeIndex > )
@@ -264,7 +271,7 @@ where
264271
265272 match result {
266273 Ok ( ( ) ) => {
267- let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key) else {
274+ let Some ( ( v, index) ) = query. query_cache ( qcx) . lookup ( & key, key_hash ) else {
268275 cold_path ( || {
269276 // We didn't find the query result in the query cache. Check if it was
270277 // poisoned due to a panic instead.
@@ -301,7 +308,8 @@ where
301308 Qcx : QueryContext ,
302309{
303310 let state = query. query_state ( qcx) ;
304- let mut state_lock = state. active . lock_shard_by_value ( & key) ;
311+ let key_hash = sharded:: make_hash ( & key) ;
312+ let mut state_lock = state. active . lock_shard_by_hash ( key_hash) ;
305313
306314 // For the parallel compiler we need to check both the query cache and query state structures
307315 // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -310,28 +318,28 @@ where
310318 // executing, but another thread may have already completed the query and stores it result
311319 // in the query cache.
312320 if cfg ! ( parallel_compiler) && qcx. dep_context ( ) . sess ( ) . threads ( ) > 1 {
313- if let Some ( ( value, index) ) = query. query_cache ( qcx) . lookup ( & key) {
321+ if let Some ( ( value, index) ) = query. query_cache ( qcx) . lookup ( & key, key_hash ) {
314322 qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
315323 return ( value, Some ( index) ) ;
316324 }
317325 }
318326
319327 let current_job_id = qcx. current_query_job ( ) ;
320328
321- match state_lock. entry ( key) {
322- Entry :: Vacant ( entry) => {
329+ match state_lock. raw_entry_mut ( ) . from_key_hashed_nocheck ( key_hash , & key) {
330+ RawEntryMut :: Vacant ( entry) => {
323331 // Nothing has computed or is computing the query, so we start a new job and insert it in the
324332 // state map.
325333 let id = qcx. next_job_id ( ) ;
326334 let job = QueryJob :: new ( id, span, current_job_id) ;
327- entry. insert ( QueryResult :: Started ( job) ) ;
335+ entry. insert_hashed_nocheck ( key_hash , key , QueryResult :: Started ( job) ) ;
328336
329337 // Drop the lock before we start executing the query
330338 drop ( state_lock) ;
331339
332- execute_job :: < _ , _ , INCR > ( query, qcx, state, key, id, dep_node)
340+ execute_job :: < _ , _ , INCR > ( query, qcx, state, key, key_hash , id, dep_node)
333341 }
334- Entry :: Occupied ( mut entry) => {
342+ RawEntryMut :: Occupied ( mut entry) => {
335343 match entry. get_mut ( ) {
336344 QueryResult :: Started ( job) => {
337345 #[ cfg( parallel_compiler) ]
@@ -342,7 +350,15 @@ where
342350
343351 // Only call `wait_for_query` if we're using a Rayon thread pool
344352 // as it will attempt to mark the worker thread as blocked.
345- return wait_for_query ( query, qcx, span, key, latch, current_job_id) ;
353+ return wait_for_query (
354+ query,
355+ qcx,
356+ span,
357+ key,
358+ key_hash,
359+ latch,
360+ current_job_id,
361+ ) ;
346362 }
347363
348364 let id = job. id ;
@@ -364,6 +380,7 @@ fn execute_job<Q, Qcx, const INCR: bool>(
364380 qcx : Qcx ,
365381 state : & QueryState < Q :: Key > ,
366382 key : Q :: Key ,
383+ key_hash : u64 ,
367384 id : QueryJobId ,
368385 dep_node : Option < DepNode > ,
369386) -> ( Q :: Value , Option < DepNodeIndex > )
@@ -395,7 +412,7 @@ where
395412 // This can't happen, as query feeding adds the very dependencies to the fed query
396413 // as its feeding query had. So if the fed query is red, so is its feeder, which will
397414 // get evaluated first, and re-feed the query.
398- if let Some ( ( cached_result, _) ) = cache. lookup ( & key) {
415+ if let Some ( ( cached_result, _) ) = cache. lookup ( & key, key_hash ) {
399416 let Some ( hasher) = query. hash_result ( ) else {
400417 panic ! (
401418 "no_hash fed query later has its value computed.\n \
@@ -427,7 +444,7 @@ where
427444 }
428445 }
429446 }
430- job_owner. complete ( cache, result, dep_node_index) ;
447+ job_owner. complete ( cache, key_hash , result, dep_node_index) ;
431448
432449 ( result, Some ( dep_node_index) )
433450}
@@ -826,7 +843,7 @@ where
826843{
827844 // We may be concurrently trying both execute and force a query.
828845 // Ensure that only one of them runs the query.
829- if let Some ( ( _, index) ) = query. query_cache ( qcx) . lookup ( & key) {
846+ if let Some ( ( _, index) ) = query. query_cache ( qcx) . lookup ( & key, sharded :: make_hash ( & key ) ) {
830847 qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
831848 return ;
832849 }
0 commit comments