@@ -89,6 +89,7 @@ use rustc_index::{IndexSlice, IndexVec};
8989use rustc_middle:: middle:: region;
9090use rustc_middle:: mir:: * ;
9191use rustc_middle:: thir:: { ExprId , LintLevel } ;
92+ use rustc_middle:: ty:: { self , TyCtxt } ;
9293use rustc_middle:: { bug, span_bug} ;
9394use rustc_session:: lint:: Level ;
9495use rustc_span:: source_map:: Spanned ;
@@ -880,22 +881,45 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
880881 block. unit ( )
881882 }
882883
884+ fn is_async_drop_impl (
885+ tcx : TyCtxt < ' tcx > ,
886+ local_decls : & IndexVec < Local , LocalDecl < ' tcx > > ,
887+ param_env : ty:: ParamEnv < ' tcx > ,
888+ local : Local ,
889+ ) -> bool {
890+ let ty = local_decls[ local] . ty ;
891+ if ty. is_async_drop ( tcx, param_env) || ty. is_coroutine ( ) {
892+ return true ;
893+ }
894+ ty. needs_async_drop ( tcx, param_env)
895+ }
896+ fn is_async_drop ( & self , local : Local ) -> bool {
897+ Self :: is_async_drop_impl ( self . tcx , & self . local_decls , self . param_env , local)
898+ }
899+
883900 fn leave_top_scope ( & mut self , block : BasicBlock ) -> BasicBlock {
884901 // If we are emitting a `drop` statement, we need to have the cached
885902 // diverge cleanup pads ready in case that drop panics.
886903 let needs_cleanup = self . scopes . scopes . last ( ) . is_some_and ( |scope| scope. needs_cleanup ( ) ) ;
887904 let is_coroutine = self . coroutine . is_some ( ) ;
888905 let unwind_to = if needs_cleanup { self . diverge_cleanup ( ) } else { DropIdx :: MAX } ;
889906
907+ let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
908+ let has_async_drops = is_coroutine
909+ && scope. drops . iter ( ) . any ( |v| v. kind == DropKind :: Value && self . is_async_drop ( v. local ) ) ;
910+ let dropline_to = if has_async_drops { Some ( self . diverge_dropline ( ) ) } else { None } ;
890911 let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
891912 build_scope_drops (
892913 & mut self . cfg ,
893914 & mut self . scopes . unwind_drops ,
915+ & mut self . scopes . coroutine_drops ,
894916 scope,
895917 block,
896918 unwind_to,
919+ dropline_to,
897920 is_coroutine && needs_cleanup,
898921 self . arg_count ,
922+ |v : Local | Self :: is_async_drop_impl ( self . tcx , & self . local_decls , self . param_env , v) ,
899923 )
900924 . into_block ( )
901925 }
@@ -1312,22 +1336,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
13121336 self . scopes . unwind_drops . add_entry_point ( start, next_drop) ;
13131337 }
13141338
1315- /// Sets up a path that performs all required cleanup for dropping a
1316- /// coroutine, starting from the given block that ends in
1317- /// [TerminatorKind::Yield].
1318- ///
1319- /// This path terminates in CoroutineDrop.
1320- pub ( crate ) fn coroutine_drop_cleanup ( & mut self , yield_block : BasicBlock ) {
1339+ /// Returns the [DropIdx] for the innermost drop for dropline (coroutine drop path).
1340+ /// The `DropIdx` will be created if it doesn't already exist.
1341+ fn diverge_dropline ( & mut self ) -> DropIdx {
1342+ // It is okay to use dummy span because the getting scope index on the topmost scope
1343+ // must always succeed.
1344+ self . diverge_dropline_target ( self . scopes . topmost ( ) , DUMMY_SP )
1345+ }
1346+
1347+ /// Similar to diverge_cleanup_target, but for dropline (coroutine drop path)
1348+ fn diverge_dropline_target ( & mut self , target_scope : region:: Scope , span : Span ) -> DropIdx {
13211349 debug_assert ! (
1322- matches!(
1323- self . cfg. block_data( yield_block) . terminator( ) . kind,
1324- TerminatorKind :: Yield { .. }
1325- ) ,
1326- "coroutine_drop_cleanup called on block with non-yield terminator."
1350+ self . coroutine. is_some( ) ,
1351+ "diverge_dropline_target is valid only for coroutine"
13271352 ) ;
1328- let ( uncached_scope, mut cached_drop) = self
1329- . scopes
1330- . scopes
1353+ let target = self . scopes . scope_index ( target_scope, span) ;
1354+ let ( uncached_scope, mut cached_drop) = self . scopes . scopes [ ..=target]
13311355 . iter ( )
13321356 . enumerate ( )
13331357 . rev ( )
@@ -1336,13 +1360,34 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
13361360 } )
13371361 . unwrap_or ( ( 0 , ROOT_NODE ) ) ;
13381362
1339- for scope in & mut self . scopes . scopes [ uncached_scope..] {
1363+ if uncached_scope > target {
1364+ return cached_drop;
1365+ }
1366+
1367+ for scope in & mut self . scopes . scopes [ uncached_scope..=target] {
13401368 for drop in & scope. drops {
13411369 cached_drop = self . scopes . coroutine_drops . add_drop ( * drop, cached_drop) ;
13421370 }
13431371 scope. cached_coroutine_drop_block = Some ( cached_drop) ;
13441372 }
13451373
1374+ cached_drop
1375+ }
1376+
1377+ /// Sets up a path that performs all required cleanup for dropping a
1378+ /// coroutine, starting from the given block that ends in
1379+ /// [TerminatorKind::Yield].
1380+ ///
1381+ /// This path terminates in CoroutineDrop.
1382+ pub ( crate ) fn coroutine_drop_cleanup ( & mut self , yield_block : BasicBlock ) {
1383+ debug_assert ! (
1384+ matches!(
1385+ self . cfg. block_data( yield_block) . terminator( ) . kind,
1386+ TerminatorKind :: Yield { .. }
1387+ ) ,
1388+ "coroutine_drop_cleanup called on block with non-yield terminator."
1389+ ) ;
1390+ let cached_drop = self . diverge_dropline ( ) ;
13461391 self . scopes . coroutine_drops . add_entry_point ( yield_block, cached_drop) ;
13471392 }
13481393
@@ -1436,18 +1481,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
14361481/// * `unwind_to`, describes the drops that would occur at this point in the code if a
14371482/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
14381483/// instructions on unwinding)
1484+ /// * `dropline_to`, describes the drops that would occur at this point in the code if a
1485+ /// coroutine drop occured.
14391486/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
14401487/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
1441- fn build_scope_drops < ' tcx > (
1488+ fn build_scope_drops < ' tcx , F > (
14421489 cfg : & mut CFG < ' tcx > ,
14431490 unwind_drops : & mut DropTree ,
1491+ coroutine_drops : & mut DropTree ,
14441492 scope : & Scope ,
14451493 block : BasicBlock ,
14461494 unwind_to : DropIdx ,
1495+ dropline_to : Option < DropIdx > ,
14471496 storage_dead_on_unwind : bool ,
14481497 arg_count : usize ,
1449- ) -> BlockAnd < ( ) > {
1450- debug ! ( "build_scope_drops({:?} -> {:?})" , block, scope) ;
1498+ is_async_drop : F ,
1499+ ) -> BlockAnd < ( ) >
1500+ where
1501+ F : Fn ( Local ) -> bool ,
1502+ {
1503+ debug ! ( "build_scope_drops({:?} -> {:?}), dropline_to={:?}" , block, scope, dropline_to) ;
14511504
14521505 // Build up the drops in evaluation order. The end result will
14531506 // look like:
@@ -1480,6 +1533,9 @@ fn build_scope_drops<'tcx>(
14801533 // will branch to `drops[n]`.
14811534 let mut block = block;
14821535
1536+ // `dropline_to` indicates what needs to be dropped should coroutine drop occur.
1537+ let mut dropline_to = dropline_to;
1538+
14831539 for drop_data in scope. drops . iter ( ) . rev ( ) {
14841540 let source_info = drop_data. source_info ;
14851541 let local = drop_data. local ;
@@ -1496,6 +1552,12 @@ fn build_scope_drops<'tcx>(
14961552 debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
14971553 unwind_to = unwind_drops. drops [ unwind_to] . next ;
14981554
1555+ if let Some ( idx) = dropline_to {
1556+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. local, drop_data. local) ;
1557+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. kind, drop_data. kind) ;
1558+ dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1559+ }
1560+
14991561 // If the operand has been moved, and we are not on an unwind
15001562 // path, then don't generate the drop. (We only take this into
15011563 // account for non-unwind paths so as not to disturb the
@@ -1505,6 +1567,12 @@ fn build_scope_drops<'tcx>(
15051567 }
15061568
15071569 unwind_drops. add_entry_point ( block, unwind_to) ;
1570+ if let Some ( to) = dropline_to
1571+ && is_async_drop ( local)
1572+ {
1573+ coroutine_drops. add_entry_point ( block, to) ;
1574+ }
1575+
15081576 let next = cfg. start_new_block ( ) ;
15091577 cfg. terminate (
15101578 block,
@@ -1562,6 +1630,11 @@ fn build_scope_drops<'tcx>(
15621630 debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
15631631 unwind_to = unwind_drops. drops [ unwind_to] . next ;
15641632 }
1633+ if let Some ( idx) = dropline_to {
1634+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. local, drop_data. local) ;
1635+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. kind, drop_data. kind) ;
1636+ dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1637+ }
15651638 // Only temps and vars need their storage dead.
15661639 assert ! ( local. index( ) > arg_count) ;
15671640 cfg. push ( block, Statement { source_info, kind : StatementKind :: StorageDead ( local) } ) ;
@@ -1620,6 +1693,39 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
16201693 }
16211694 }
16221695 }
1696+ // Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
1697+ if is_coroutine
1698+ && drops. drops . iter ( ) . any ( |DropNode { data, next : _ } | {
1699+ data. kind == DropKind :: Value && self . is_async_drop ( data. local )
1700+ } )
1701+ {
1702+ let dropline_target = self . diverge_dropline_target ( else_scope, span) ;
1703+ let mut dropline_indices = IndexVec :: from_elem_n ( dropline_target, 1 ) ;
1704+ for ( drop_idx, drop_data) in drops. drops . iter_enumerated ( ) . skip ( 1 ) {
1705+ match drop_data. data . kind {
1706+ DropKind :: Storage => {
1707+ let coroutine_drop = self
1708+ . scopes
1709+ . coroutine_drops
1710+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1711+ dropline_indices. push ( coroutine_drop) ;
1712+ }
1713+ DropKind :: Value => {
1714+ let coroutine_drop = self
1715+ . scopes
1716+ . coroutine_drops
1717+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1718+ if self . is_async_drop ( drop_data. data . local ) {
1719+ self . scopes . coroutine_drops . add_entry_point (
1720+ blocks[ drop_idx] . unwrap ( ) ,
1721+ dropline_indices[ drop_data. next ] ,
1722+ ) ;
1723+ }
1724+ dropline_indices. push ( coroutine_drop) ;
1725+ }
1726+ }
1727+ }
1728+ }
16231729 blocks[ ROOT_NODE ] . map ( BasicBlock :: unit)
16241730 }
16251731
@@ -1665,9 +1771,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
16651771 // to be captured by the coroutine. I'm not sure how important this
16661772 // optimization is, but it is here.
16671773 for ( drop_idx, drop_node) in drops. drops . iter_enumerated ( ) {
1668- if let DropKind :: Value = drop_node. data . kind {
1774+ if let DropKind :: Value = drop_node. data . kind
1775+ && let Some ( bb) = blocks[ drop_idx]
1776+ {
16691777 debug_assert ! ( drop_node. next < drops. drops. next_index( ) ) ;
1670- drops. entry_points . push ( ( drop_node. next , blocks [ drop_idx ] . unwrap ( ) ) ) ;
1778+ drops. entry_points . push ( ( drop_node. next , bb ) ) ;
16711779 }
16721780 }
16731781 Self :: build_unwind_tree ( cfg, drops, fn_span, resume_block) ;
@@ -1721,6 +1829,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop {
17211829 let term = cfg. block_data_mut ( from) . terminator_mut ( ) ;
17221830 if let TerminatorKind :: Yield { ref mut drop, .. } = term. kind {
17231831 * drop = Some ( to) ;
1832+ } else if let TerminatorKind :: Drop { ref mut drop, .. } = term. kind {
1833+ * drop = Some ( to) ;
17241834 } else {
17251835 span_bug ! (
17261836 term. source_info. span,
0 commit comments