@@ -131,6 +131,9 @@ pub struct Scope<'tcx> {
131131
132132 /// The cache for drop chain on "generator drop" exit.
133133 cached_generator_drop : Option < BasicBlock > ,
134+
135+ /// The cache for drop chain on "unwind" exit.
136+ cached_unwind : CachedBlock ,
134137}
135138
136139#[ derive( Debug ) ]
@@ -154,6 +157,11 @@ struct CachedBlock {
154157 unwind : Option < BasicBlock > ,
155158
156159 /// The cached block for unwinds during cleanups-on-generator-drop path
160+ ///
161+ /// This is split from the standard unwind path here to prevent drop
162+ /// elaboration from creating drop flags that would have to be captured
163+ /// by the generator. I'm not sure how important this optimization is,
164+ /// but it is here.
157165 generator_drop : Option < BasicBlock > ,
158166}
159167
@@ -217,34 +225,29 @@ impl<'tcx> Scope<'tcx> {
217225 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
218226 /// larger extent of code.
219227 ///
220- /// `unwind` controls whether caches for the unwind branch are also invalidated.
221- fn invalidate_cache ( & mut self , unwind : bool ) {
228+ /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
229+ /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
230+ /// top-of-scope (as opposed to dependent scopes).
231+ fn invalidate_cache ( & mut self , storage_only : bool , this_scope_only : bool ) {
232+ // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
233+ // with lots of `try!`?
234+
235+ // cached exits drop storage and refer to the top-of-scope
222236 self . cached_exits . clear ( ) ;
223- if !unwind { return ; }
224- for dropdata in & mut self . drops {
225- if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
226- cached_block. invalidate ( ) ;
227- }
237+
238+ if !storage_only {
239+ // the current generator drop and unwind ignore
240+ // storage but refer to top-of-scope
241+ self . cached_generator_drop = None ;
242+ self . cached_unwind . invalidate ( ) ;
228243 }
229- }
230244
231- /// Returns the cached entrypoint for diverging exit from this scope.
232- ///
233- /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
234- /// this method to work correctly.
235- fn cached_block ( & self , generator_drop : bool ) -> Option < BasicBlock > {
236- let mut drops = self . drops . iter ( ) . rev ( ) . filter_map ( |data| {
237- match data. kind {
238- DropKind :: Value { cached_block } => {
239- Some ( cached_block. get ( generator_drop) )
245+ if !storage_only && !this_scope_only {
246+ for dropdata in & mut self . drops {
247+ if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
248+ cached_block. invalidate ( ) ;
240249 }
241- DropKind :: Storage => None
242250 }
243- } ) ;
244- if let Some ( cached_block) = drops. next ( ) {
245- Some ( cached_block. expect ( "drop cache is not filled" ) )
246- } else {
247- None
248251 }
249252 }
250253
@@ -356,7 +359,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
356359 needs_cleanup : false ,
357360 drops : vec ! [ ] ,
358361 cached_generator_drop : None ,
359- cached_exits : FxHashMap ( )
362+ cached_exits : FxHashMap ( ) ,
363+ cached_unwind : CachedBlock :: default ( ) ,
360364 } ) ;
361365 }
362366
@@ -482,15 +486,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
482486 TerminatorKind :: Goto { target : b } ) ;
483487 b
484488 } ;
489+
490+ // End all regions for scopes out of which we are breaking.
491+ self . cfg . push_end_region ( self . hir . tcx ( ) , block, src_info, scope. region_scope ) ;
492+
485493 unpack ! ( block = build_scope_drops( & mut self . cfg,
486494 scope,
487495 rest,
488496 block,
489497 self . arg_count,
490498 true ) ) ;
491-
492- // End all regions for scopes out of which we are breaking.
493- self . cfg . push_end_region ( self . hir . tcx ( ) , block, src_info, scope. region_scope ) ;
494499 }
495500
496501 self . cfg . terminate ( block, src_info, TerminatorKind :: GeneratorDrop ) ;
@@ -672,8 +677,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
672677 // invalidating caches of each scope visited. This way bare minimum of the
673678 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
674679 // cache of outer scpoe stays intact.
675- let invalidate_unwind = needs_drop && !this_scope;
676- scope. invalidate_cache ( invalidate_unwind) ;
680+ scope. invalidate_cache ( !needs_drop, this_scope) ;
677681 if this_scope {
678682 if let DropKind :: Value { .. } = drop_kind {
679683 scope. needs_cleanup = true ;
@@ -819,30 +823,50 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
819823 generator_drop : bool )
820824 -> BlockAnd < ( ) > {
821825 debug ! ( "build_scope_drops({:?} -> {:?})" , block, scope) ;
822- let mut iter = scope. drops . iter ( ) . rev ( ) . peekable ( ) ;
826+ let mut iter = scope. drops . iter ( ) . rev ( ) ;
823827 while let Some ( drop_data) = iter. next ( ) {
824828 let source_info = scope. source_info ( drop_data. span ) ;
825829 match drop_data. kind {
826830 DropKind :: Value { .. } => {
827- // Try to find the next block with its cached block
828- // for us to diverge into in case the drop panics.
829- let on_diverge = iter. peek ( ) . iter ( ) . filter_map ( |dd| {
831+ // Try to find the next block with its cached block for us to
832+ // diverge into, either a previous block in this current scope or
833+ // the top of the previous scope.
834+ //
835+ // If it wasn't for EndRegion, we could just chain all the DropData
836+ // together and pick the first DropKind::Value. Please do that
837+ // when we replace EndRegion with NLL.
838+ let on_diverge = iter. clone ( ) . filter_map ( |dd| {
830839 match dd. kind {
831- DropKind :: Value { cached_block } => {
832- let result = cached_block. get ( generator_drop) ;
833- if result. is_none ( ) {
834- span_bug ! ( drop_data. span, "cached block not present?" )
835- }
836- result
837- } ,
840+ DropKind :: Value { cached_block } => Some ( cached_block) ,
838841 DropKind :: Storage => None
839842 }
840- } ) . next ( ) ;
841- // If there’s no `cached_block`s within current scope,
842- // we must look for one in the enclosing scope.
843- let on_diverge = on_diverge. or_else ( || {
844- earlier_scopes. iter ( ) . rev ( ) . flat_map ( |s| s. cached_block ( generator_drop) ) . next ( )
843+ } ) . next ( ) . or_else ( || {
844+ if earlier_scopes. iter ( ) . any ( |scope| scope. needs_cleanup ) {
845+ // If *any* scope requires cleanup code to be run,
846+ // we must use the cached unwind from the *topmost*
847+ // scope, to ensure all EndRegions from surrounding
848+ // scopes are executed before the drop code runs.
849+ Some ( earlier_scopes. last ( ) . unwrap ( ) . cached_unwind )
850+ } else {
851+ // We don't need any further cleanup, so return None
852+ // to avoid creating a landing pad. We can skip
853+ // EndRegions because all local regions end anyway
854+ // when the function unwinds.
855+ //
856+ // This is an important optimization because LLVM is
857+ // terrible at optimizing landing pads. FIXME: I think
858+ // it would be cleaner and better to do this optimization
859+ // in SimplifyCfg instead of here.
860+ None
861+ }
862+ } ) ;
863+
864+ let on_diverge = on_diverge. map ( |cached_block| {
865+ cached_block. get ( generator_drop) . unwrap_or_else ( || {
866+ span_bug ! ( drop_data. span, "cached block not present?" )
867+ } )
845868 } ) ;
869+
846870 let next = cfg. start_new_block ( ) ;
847871 cfg. terminate ( block, source_info, TerminatorKind :: Drop {
848872 location : drop_data. location . clone ( ) ,
@@ -933,14 +957,23 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
933957 } ;
934958 }
935959
936- // Finally, push the EndRegion block, used by mir-borrowck. (Block
937- // becomes trivial goto after pass that removes all EndRegions.)
938- {
939- let block = cfg. start_new_cleanup_block ( ) ;
940- cfg. push_end_region ( tcx, block, source_info ( span) , scope. region_scope ) ;
941- cfg. terminate ( block, source_info ( span) , TerminatorKind :: Goto { target : target } ) ;
942- target = block
943- }
960+ // Finally, push the EndRegion block, used by mir-borrowck, and set
961+ // `cached_unwind` to point to it (Block becomes trivial goto after
962+ // pass that removes all EndRegions).
963+ target = {
964+ let cached_block = scope. cached_unwind . ref_mut ( generator_drop) ;
965+ if let Some ( cached_block) = * cached_block {
966+ cached_block
967+ } else {
968+ let block = cfg. start_new_cleanup_block ( ) ;
969+ cfg. push_end_region ( tcx, block, source_info ( span) , scope. region_scope ) ;
970+ cfg. terminate ( block, source_info ( span) , TerminatorKind :: Goto { target : target } ) ;
971+ * cached_block = Some ( block) ;
972+ block
973+ }
974+ } ;
975+
976+ debug ! ( "build_diverge_scope({:?}, {:?}) = {:?}" , scope, span, target) ;
944977
945978 target
946979}
0 commit comments