@@ -760,30 +760,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
760760 count : u64 ,
761761 dest : PlaceRef < ' tcx , & ' ll Value > ,
762762 ) {
763- let zero = self . const_usize ( 0 ) ;
764- let count = self . const_usize ( count) ;
765-
766- let header_bb = self . append_sibling_block ( "repeat_loop_header" ) ;
767- let body_bb = self . append_sibling_block ( "repeat_loop_body" ) ;
768- let next_bb = self . append_sibling_block ( "repeat_loop_next" ) ;
769-
770- self . br ( header_bb) ;
771-
772- let mut header_bx = Self :: build ( self . cx , header_bb) ;
773- let i = header_bx. phi ( self . val_ty ( zero) , & [ zero] , & [ self . llbb ( ) ] ) ;
774-
775- let keep_going = header_bx. icmp ( IntPredicate :: IntULT , i, count) ;
776- header_bx. cond_br ( keep_going, body_bb, next_bb) ;
777-
778- let mut body_bx = Self :: build ( self . cx , body_bb) ;
779- let dest_elem = dest. project_index ( & mut body_bx, i) ;
780- cg_elem. val . store ( & mut body_bx, dest_elem) ;
781-
782- let next = body_bx. unchecked_uadd ( i, self . const_usize ( 1 ) ) ;
783- body_bx. br ( header_bb) ;
784- header_bx. add_incoming_to_phi ( i, next, body_bb) ;
785-
786- * self = Self :: build ( self . cx , next_bb) ;
763+ if self . cx . sess ( ) . opts . optimize == OptLevel :: No {
764+ // To let debuggers single-step over lines like
765+ //
766+ // let foo = ["bar"; 42];
767+ //
768+ // we need the debugger-friendly LLVM IR that `_unoptimized()`
769+ // provides. The `_optimized()` version generates trickier LLVM IR.
770+ // See PR #148058 for a failed attempt at handling that.
771+ self . write_operand_repeatedly_unoptimized ( cg_elem, count, dest) ;
772+ } else {
773+ self . write_operand_repeatedly_optimized ( cg_elem, count, dest) ;
774+ }
787775 }
788776
789777 fn range_metadata ( & mut self , load : & ' ll Value , range : WrappingRange ) {
@@ -1514,6 +1502,78 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
15141502 self . set_metadata_node ( inst, llvm:: MD_unpredictable , & [ ] ) ;
15151503 }
15161504
1505+ fn write_operand_repeatedly_optimized (
1506+ & mut self ,
1507+ cg_elem : OperandRef < ' tcx , & ' ll Value > ,
1508+ count : u64 ,
1509+ dest : PlaceRef < ' tcx , & ' ll Value > ,
1510+ ) {
1511+ let zero = self . const_usize ( 0 ) ;
1512+ let count = self . const_usize ( count) ;
1513+
1514+ let header_bb = self . append_sibling_block ( "repeat_loop_header" ) ;
1515+ let body_bb = self . append_sibling_block ( "repeat_loop_body" ) ;
1516+ let next_bb = self . append_sibling_block ( "repeat_loop_next" ) ;
1517+
1518+ self . br ( header_bb) ;
1519+
1520+ let mut header_bx = Self :: build ( self . cx , header_bb) ;
1521+ let i = header_bx. phi ( self . val_ty ( zero) , & [ zero] , & [ self . llbb ( ) ] ) ;
1522+
1523+ let keep_going = header_bx. icmp ( IntPredicate :: IntULT , i, count) ;
1524+ header_bx. cond_br ( keep_going, body_bb, next_bb) ;
1525+
1526+ let mut body_bx = Self :: build ( self . cx , body_bb) ;
1527+ let dest_elem = dest. project_index ( & mut body_bx, i) ;
1528+ cg_elem. val . store ( & mut body_bx, dest_elem) ;
1529+
1530+ let next = body_bx. unchecked_uadd ( i, self . const_usize ( 1 ) ) ;
1531+ body_bx. br ( header_bb) ;
1532+ header_bx. add_incoming_to_phi ( i, next, body_bb) ;
1533+
1534+ * self = Self :: build ( self . cx , next_bb) ;
1535+ }
1536+
1537+ fn write_operand_repeatedly_unoptimized (
1538+ & mut self ,
1539+ cg_elem : OperandRef < ' tcx , & ' ll Value > ,
1540+ count : u64 ,
1541+ dest : PlaceRef < ' tcx , & ' ll Value > ,
1542+ ) {
1543+ let zero = self . const_usize ( 0 ) ;
1544+ let count = self . const_usize ( count) ;
1545+ let start = dest. project_index ( self , zero) . val . llval ;
1546+ let end = dest. project_index ( self , count) . val . llval ;
1547+
1548+ let header_bb = self . append_sibling_block ( "repeat_loop_header" ) ;
1549+ let body_bb = self . append_sibling_block ( "repeat_loop_body" ) ;
1550+ let next_bb = self . append_sibling_block ( "repeat_loop_next" ) ;
1551+
1552+ self . br ( header_bb) ;
1553+
1554+ let mut header_bx = Self :: build ( self . cx , header_bb) ;
1555+ let current = header_bx. phi ( self . val_ty ( start) , & [ start] , & [ self . llbb ( ) ] ) ;
1556+
1557+ let keep_going = header_bx. icmp ( IntPredicate :: IntNE , current, end) ;
1558+ header_bx. cond_br ( keep_going, body_bb, next_bb) ;
1559+
1560+ let mut body_bx = Self :: build ( self . cx , body_bb) ;
1561+ let align = dest. val . align . restrict_for_offset ( dest. layout . field ( self . cx ( ) , 0 ) . size ) ;
1562+ cg_elem
1563+ . val
1564+ . store ( & mut body_bx, PlaceRef :: new_sized_aligned ( current, cg_elem. layout , align) ) ;
1565+
1566+ let next = body_bx. inbounds_gep (
1567+ self . backend_type ( cg_elem. layout ) ,
1568+ current,
1569+ & [ self . const_usize ( 1 ) ] ,
1570+ ) ;
1571+ body_bx. br ( header_bb) ;
1572+ header_bx. add_incoming_to_phi ( current, next, body_bb) ;
1573+
1574+ * self = Self :: build ( self . cx , next_bb) ;
1575+ }
1576+
15171577 pub ( crate ) fn minnum ( & mut self , lhs : & ' ll Value , rhs : & ' ll Value ) -> & ' ll Value {
15181578 self . call_intrinsic ( "llvm.minnum" , & [ self . val_ty ( lhs) ] , & [ lhs, rhs] )
15191579 }
0 commit comments