@@ -123,7 +123,7 @@ mod job;
123123mod job_state;
124124
125125use std:: cell:: RefCell ;
126- use std:: collections:: { BTreeMap , HashMap , HashSet } ;
126+ use std:: collections:: { HashMap , HashSet } ;
127127use std:: fmt:: Write as _;
128128use std:: io;
129129use std:: path:: { Path , PathBuf } ;
@@ -133,7 +133,7 @@ use std::time::Duration;
133133
134134use anyhow:: { format_err, Context as _} ;
135135use cargo_util:: ProcessBuilder ;
136- use jobserver:: { Acquired , Client , HelperThread } ;
136+ use jobserver:: { Acquired , HelperThread } ;
137137use log:: { debug, trace} ;
138138use semver:: Version ;
139139
@@ -199,13 +199,6 @@ struct DrainState<'cfg> {
199199 /// single rustc process.
200200 tokens : Vec < Acquired > ,
201201
202- /// rustc per-thread tokens, when in jobserver-per-rustc mode.
203- rustc_tokens : HashMap < JobId , Vec < Acquired > > ,
204-
205- /// This represents the list of rustc jobs (processes) and associated
206- /// clients that are interested in receiving a token.
207- to_send_clients : BTreeMap < JobId , Vec < Client > > ,
208-
209202 /// The list of jobs that we have not yet started executing, but have
210203 /// retrieved from the `queue`. We eagerly pull jobs off the main queue to
211204 /// allow us to request jobserver tokens pretty early.
@@ -387,12 +380,6 @@ enum Message {
387380 Token ( io:: Result < Acquired > ) ,
388381 Finish ( JobId , Artifact , CargoResult < ( ) > ) ,
389382 FutureIncompatReport ( JobId , Vec < FutureBreakageItem > ) ,
390-
391- // This client should get release_raw called on it with one of our tokens
392- NeedsToken ( JobId ) ,
393-
394- // A token previously passed to a NeedsToken client is being released.
395- ReleaseToken ( JobId ) ,
396383}
397384
398385impl < ' cfg > JobQueue < ' cfg > {
@@ -507,8 +494,6 @@ impl<'cfg> JobQueue<'cfg> {
507494 next_id : 0 ,
508495 timings : self . timings ,
509496 tokens : Vec :: new ( ) ,
510- rustc_tokens : HashMap :: new ( ) ,
511- to_send_clients : BTreeMap :: new ( ) ,
512497 pending_queue : Vec :: new ( ) ,
513498 print : DiagnosticPrinter :: new ( cx. bcx . config ) ,
514499 finished : 0 ,
@@ -600,46 +585,9 @@ impl<'cfg> DrainState<'cfg> {
600585 self . active . len ( ) < self . tokens . len ( ) + 1
601586 }
602587
603- // The oldest job (i.e., least job ID) is the one we grant tokens to first.
604- fn pop_waiting_client ( & mut self ) -> ( JobId , Client ) {
605- // FIXME: replace this with BTreeMap::first_entry when that stabilizes.
606- let key = * self
607- . to_send_clients
608- . keys ( )
609- . next ( )
610- . expect ( "at least one waiter" ) ;
611- let clients = self . to_send_clients . get_mut ( & key) . unwrap ( ) ;
612- let client = clients. pop ( ) . unwrap ( ) ;
613- if clients. is_empty ( ) {
614- self . to_send_clients . remove ( & key) ;
615- }
616- ( key, client)
617- }
618-
619- // If we managed to acquire some extra tokens, send them off to a waiting rustc.
620- fn grant_rustc_token_requests ( & mut self ) -> CargoResult < ( ) > {
621- while !self . to_send_clients . is_empty ( ) && self . has_extra_tokens ( ) {
622- let ( id, client) = self . pop_waiting_client ( ) ;
623- // This unwrap is guaranteed to succeed. `active` must be at least
624- // length 1, as otherwise there can't be a client waiting to be sent
625- // on, so tokens.len() must also be at least one.
626- let token = self . tokens . pop ( ) . unwrap ( ) ;
627- self . rustc_tokens
628- . entry ( id)
629- . or_insert_with ( Vec :: new)
630- . push ( token) ;
631- client
632- . release_raw ( )
633- . with_context ( || "failed to release jobserver token" ) ?;
634- }
635-
636- Ok ( ( ) )
637- }
638-
639588 fn handle_event (
640589 & mut self ,
641590 cx : & mut Context < ' _ , ' _ > ,
642- jobserver_helper : & HelperThread ,
643591 plan : & mut BuildPlan ,
644592 event : Message ,
645593 ) -> Result < ( ) , ErrorToHandle > {
@@ -699,19 +647,6 @@ impl<'cfg> DrainState<'cfg> {
699647 Artifact :: All => {
700648 trace ! ( "end: {:?}" , id) ;
701649 self . finished += 1 ;
702- if let Some ( rustc_tokens) = self . rustc_tokens . remove ( & id) {
703- // This puts back the tokens that this rustc
704- // acquired into our primary token list.
705- //
706- // This represents a rustc bug: it did not
707- // release all of its thread tokens but finished
708- // completely. But we want to make Cargo resilient
709- // to such rustc bugs, as they're generally not
710- // fatal in nature (i.e., Cargo can make progress
711- // still, and the build might not even fail).
712- self . tokens . extend ( rustc_tokens) ;
713- }
714- self . to_send_clients . remove ( & id) ;
715650 self . report_warning_count (
716651 cx. bcx . config ,
717652 id,
@@ -756,31 +691,6 @@ impl<'cfg> DrainState<'cfg> {
756691 let token = acquired_token. with_context ( || "failed to acquire jobserver token" ) ?;
757692 self . tokens . push ( token) ;
758693 }
759- Message :: NeedsToken ( id) => {
760- trace ! ( "queue token request" ) ;
761- jobserver_helper. request_token ( ) ;
762- let client = cx. rustc_clients [ & self . active [ & id] ] . clone ( ) ;
763- self . to_send_clients
764- . entry ( id)
765- . or_insert_with ( Vec :: new)
766- . push ( client) ;
767- }
768- Message :: ReleaseToken ( id) => {
769- // Note that this pops off potentially a completely
770- // different token, but all tokens of the same job are
771- // conceptually the same so that's fine.
772- //
773- // self.tokens is a "pool" -- the order doesn't matter -- and
774- // this transfers ownership of the token into that pool. If we
775- // end up using it on the next go around, then this token will
776- // be truncated, same as tokens obtained through Message::Token.
777- let rustc_tokens = self
778- . rustc_tokens
779- . get_mut ( & id)
780- . expect ( "no tokens associated" ) ;
781- self . tokens
782- . push ( rustc_tokens. pop ( ) . expect ( "rustc releases token it has" ) ) ;
783- }
784694 }
785695
786696 Ok ( ( ) )
@@ -795,19 +705,6 @@ impl<'cfg> DrainState<'cfg> {
795705 // listen for a message with a timeout, and on timeout we run the
796706 // previous parts of the loop again.
797707 let mut events = self . messages . try_pop_all ( ) ;
798- trace ! (
799- "tokens in use: {}, rustc_tokens: {:?}, waiting_rustcs: {:?} (events this tick: {})" ,
800- self . tokens. len( ) ,
801- self . rustc_tokens
802- . iter( )
803- . map( |( k, j) | ( k, j. len( ) ) )
804- . collect:: <Vec <_>>( ) ,
805- self . to_send_clients
806- . iter( )
807- . map( |( k, j) | ( k, j. len( ) ) )
808- . collect:: <Vec <_>>( ) ,
809- events. len( ) ,
810- ) ;
811708 if events. is_empty ( ) {
812709 loop {
813710 self . tick_progress ( ) ;
@@ -866,17 +763,13 @@ impl<'cfg> DrainState<'cfg> {
866763 break ;
867764 }
868765
869- if let Err ( e) = self . grant_rustc_token_requests ( ) {
870- self . handle_error ( & mut cx. bcx . config . shell ( ) , & mut errors, e) ;
871- }
872-
873766 // And finally, before we block waiting for the next event, drop any
874767 // excess tokens we may have accidentally acquired. Due to how our
875768 // jobserver interface is architected we may acquire a token that we
876769 // don't actually use, and if this happens just relinquish it back
877770 // to the jobserver itself.
878771 for event in self . wait_for_events ( ) {
879- if let Err ( event_err) = self . handle_event ( cx, jobserver_helper , plan, event) {
772+ if let Err ( event_err) = self . handle_event ( cx, plan, event) {
880773 self . handle_error ( & mut cx. bcx . config . shell ( ) , & mut errors, event_err) ;
881774 }
882775 }
0 commit comments