@@ -188,6 +188,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
188188 }
189189 }
190190
191+ /// Transmutes the `src` value to the destination type by writing it to `dst`.
192+ ///
193+ /// See also [`Self::codegen_transmute_operand`] for cases that can be done
194+ /// without needing a pre-allocated place for the destination.
191195 fn codegen_transmute (
192196 & mut self ,
193197 bx : & mut Bx ,
@@ -198,31 +202,36 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
198202 assert ! ( src. layout. is_sized( ) ) ;
199203 assert ! ( dst. layout. is_sized( ) ) ;
200204
201- if src. layout . size == dst. layout . size {
205+ if src. layout . size != dst. layout . size
206+ || src. layout . is_uninhabited ( )
207+ || dst. layout . is_uninhabited ( )
208+ {
209+ // These cases are all UB to actually hit, so don't emit code for them.
210+ // (The size mismatches are reachable via `transmute_unchecked`.)
211+ // We can't use unreachable because that's a terminator, and we
212+ // need something that can be in the middle of a basic block.
213+ bx. assume ( bx. cx ( ) . const_bool ( false ) )
214+ } else {
202215 // Since in this path we have a place anyway, we can store or copy to it,
203216 // making sure we use the destination place's alignment even if the
204217 // source would normally have a higher one.
205218 src. val . store ( bx, dst. val . with_type ( src. layout ) ) ;
206- } else if src. layout . is_uninhabited ( ) {
207- bx. unreachable ( )
208- } else {
209- // Since this is known statically and the input could have existed
210- // without already having hit UB, might as well trap for it, even
211- // though it's UB so we *could* also unreachable it.
212- bx. abort ( ) ;
213219 }
214220 }
215221
216- /// Attempts to transmute an `OperandValue` to another `OperandValue`.
222+ /// Transmutes an `OperandValue` to another `OperandValue`.
217223 ///
218- /// Returns `None` for cases that can't work in that framework, such as for
219- /// `Immediate`->`Ref` that needs an `alloca` to get the location.
224+ /// This is supported only for cases where [`Self::rvalue_creates_operand`]
225+ /// returns `true`, and will ICE otherwise. (In particular, anything that
226+ /// would need to `alloca` in order to return a `PlaceValue` will ICE,
227+ /// expecting those to go via [`Self::codegen_transmute`] instead where
228+ /// the destination place is already allocated.)
220229 pub ( crate ) fn codegen_transmute_operand (
221230 & mut self ,
222231 bx : & mut Bx ,
223232 operand : OperandRef < ' tcx , Bx :: Value > ,
224233 cast : TyAndLayout < ' tcx > ,
225- ) -> Option < OperandValue < Bx :: Value > > {
234+ ) -> OperandValue < Bx :: Value > {
226235 // Check for transmutes that are always UB.
227236 if operand. layout . size != cast. size
228237 || operand. layout . is_uninhabited ( )
@@ -236,17 +245,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
236245
237246 // Because this transmute is UB, return something easy to generate,
238247 // since it's fine that later uses of the value are probably UB.
239- return Some ( OperandValue :: poison ( bx, cast) ) ;
248+ return OperandValue :: poison ( bx, cast) ;
240249 }
241250
242- Some ( match ( operand. val , operand. layout . backend_repr , cast. backend_repr ) {
251+ match ( operand. val , operand. layout . backend_repr , cast. backend_repr ) {
243252 _ if cast. is_zst ( ) => OperandValue :: ZeroSized ,
244- ( OperandValue :: ZeroSized , _, _) => bug ! ( ) ,
245- (
246- OperandValue :: Ref ( source_place_val) ,
247- abi:: BackendRepr :: Memory { .. } ,
248- abi:: BackendRepr :: Scalar ( _) | abi:: BackendRepr :: ScalarPair ( _, _) ,
249- ) => {
253+ ( _, _, abi:: BackendRepr :: Memory { .. } ) => {
254+ bug ! ( "Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}" ) ;
255+ }
256+ ( OperandValue :: Ref ( source_place_val) , abi:: BackendRepr :: Memory { .. } , _) => {
250257 assert_eq ! ( source_place_val. llextra, None ) ;
251258 // The existing alignment is part of `source_place_val`,
252259 // so that alignment will be used, not `cast`'s.
@@ -265,8 +272,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
265272 transmute_scalar ( bx, imm_a, in_a, out_a) ,
266273 transmute_scalar ( bx, imm_b, in_b, out_b) ,
267274 ) ,
268- _ => return None ,
269- } )
275+ _ => bug ! ( "Cannot `codegen_transmute_operand` {operand:?} to {cast:?}" ) ,
276+ }
270277 }
271278
272279 /// Cast one of the immediates from an [`OperandValue::Immediate`]
@@ -458,9 +465,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
458465 } )
459466 }
460467 mir:: CastKind :: Transmute => {
461- self . codegen_transmute_operand ( bx, operand, cast) . unwrap_or_else ( || {
462- bug ! ( "Unsupported transmute-as-operand of {operand:?} to {cast:?}" ) ;
463- } )
468+ self . codegen_transmute_operand ( bx, operand, cast)
464469 }
465470 } ;
466471 OperandRef { val, layout : cast }
@@ -942,15 +947,29 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
942947 OperandValue :: Pair ( val, of)
943948 }
944949
950+ /// Returns `true` if the `rvalue` can be computed into an [`OperandRef`],
951+ /// rather than needing a full `PlaceRef` for the assignment destination.
952+ ///
953+ /// This is used by the [`super::analyze`] code to decide which MIR locals
954+ /// can stay as SSA values (as opposed to generating `alloca` slots for them).
955+ /// As such, some paths here return `true` even where the specific rvalue
956+ /// will not actually take the operand path because the result type is such
957+ /// that it always gets an `alloca`, but where it's not worth re-checking the
958+ /// layout in this code when the right thing will happen anyway.
945959 pub ( crate ) fn rvalue_creates_operand ( & self , rvalue : & mir:: Rvalue < ' tcx > , span : Span ) -> bool {
946960 match * rvalue {
947961 mir:: Rvalue :: Cast ( mir:: CastKind :: Transmute , ref operand, cast_ty) => {
948962 let operand_ty = operand. ty ( self . mir , self . cx . tcx ( ) ) ;
949963 let cast_layout = self . cx . layout_of ( self . monomorphize ( cast_ty) ) ;
950964 let operand_layout = self . cx . layout_of ( self . monomorphize ( operand_ty) ) ;
951965 match ( operand_layout. backend_repr , cast_layout. backend_repr ) {
952- // If the input is in a place we can load immediates from there.
953- ( abi:: BackendRepr :: Memory { .. } , abi:: BackendRepr :: Scalar ( _) | abi:: BackendRepr :: ScalarPair ( _, _) ) => true ,
966+ // When the output will be in memory anyway, just use its place
967+ // (instead of the operand path) unless it's the trivial ZST case.
968+ ( _, abi:: BackendRepr :: Memory { .. } ) => cast_layout. is_zst ( ) ,
969+
970+ // Otherwise (for a non-memory output) if the input is memory
971+ // then we can just read the value from the place.
972+ ( abi:: BackendRepr :: Memory { .. } , _) => true ,
954973
955974 // When we have scalar immediates, we can only convert things
956975 // where the sizes match, to avoid endianness questions.
@@ -959,18 +978,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
959978 ( abi:: BackendRepr :: ScalarPair ( a0, a1) , abi:: BackendRepr :: ScalarPair ( b0, b1) ) =>
960979 a0. size ( self . cx ) == b0. size ( self . cx ) && a1. size ( self . cx ) == b1. size ( self . cx ) ,
961980
962- // SIMD vectors don't work like normal immediates,
963- // so always send them through memory.
964- ( abi:: BackendRepr :: SimdVector { .. } , _) | ( _, abi:: BackendRepr :: SimdVector { .. } ) => false ,
965-
966- // When the output will be in memory anyway, just use its place
967- // (instead of the operand path) unless it's the trivial ZST case.
968- ( _, abi:: BackendRepr :: Memory { .. } ) => cast_layout. is_zst ( ) ,
969-
970981 // Mixing Scalars and ScalarPairs can get quite complicated when
971982 // padding and undef get involved, so leave that to the memory path.
972983 ( abi:: BackendRepr :: Scalar ( _) , abi:: BackendRepr :: ScalarPair ( _, _) ) |
973984 ( abi:: BackendRepr :: ScalarPair ( _, _) , abi:: BackendRepr :: Scalar ( _) ) => false ,
985+
986+ // SIMD vectors aren't worth the trouble of dealing with complex
987+ // cases like from vectors of f32 to vectors of pointers or
988+ // from fat pointers to vectors of u16. (See #143194 #110021 ...)
989+ ( abi:: BackendRepr :: SimdVector { .. } , _) | ( _, abi:: BackendRepr :: SimdVector { .. } ) => false ,
974990 }
975991 }
976992 mir:: Rvalue :: Ref ( ..) |
0 commit comments