@@ -175,14 +175,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
175175 bug ! ( "expected fn item type, found {}" , callee_ty) ;
176176 } ;
177177
178- let sig = callee_ty. fn_sig ( tcx) ;
179- let sig = tcx. normalize_erasing_late_bound_regions ( self . typing_env ( ) , sig) ;
180- let arg_tys = sig. inputs ( ) ;
181- let ret_ty = sig. output ( ) ;
182178 let name = tcx. item_name ( def_id) ;
183179
184- let llret_ty = self . layout_of ( ret_ty) . llvm_type ( self ) ;
185-
186180 let simple = get_simple_intrinsic ( self , name) ;
187181 let llval = match name {
188182 _ if simple. is_some ( ) => {
@@ -265,22 +259,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
265259 BackendRepr :: Scalar ( scalar) => {
266260 match scalar. primitive ( ) {
267261 Primitive :: Int ( ..) => {
268- if self . cx ( ) . size_of ( ret_ty ) . bytes ( ) < 4 {
262+ if self . cx ( ) . size_of ( result . layout . ty ) . bytes ( ) < 4 {
269263 // `va_arg` should not be called on an integer type
270264 // less than 4 bytes in length. If it is, promote
271265 // the integer to an `i32` and truncate the result
272266 // back to the smaller type.
273267 let promoted_result = emit_va_arg ( self , args[ 0 ] , tcx. types . i32 ) ;
274- self . trunc ( promoted_result, llret_ty )
268+ self . trunc ( promoted_result, result . layout . llvm_type ( self ) )
275269 } else {
276- emit_va_arg ( self , args[ 0 ] , ret_ty )
270+ emit_va_arg ( self , args[ 0 ] , result . layout . ty )
277271 }
278272 }
279273 Primitive :: Float ( Float :: F16 ) => {
280274 bug ! ( "the va_arg intrinsic does not work with `f16`" )
281275 }
282276 Primitive :: Float ( Float :: F64 ) | Primitive :: Pointer ( _) => {
283- emit_va_arg ( self , args[ 0 ] , ret_ty )
277+ emit_va_arg ( self , args[ 0 ] , result . layout . ty )
284278 }
285279 // `va_arg` should never be used with the return type f32.
286280 Primitive :: Float ( Float :: F32 ) => {
@@ -384,7 +378,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
384378 | sym:: rotate_right
385379 | sym:: saturating_add
386380 | sym:: saturating_sub => {
387- let ty = arg_tys [ 0 ] ;
381+ let ty = args [ 0 ] . layout . ty ;
388382 if !ty. is_integral ( ) {
389383 tcx. dcx ( ) . emit_err ( InvalidMonomorphization :: BasicIntegerType {
390384 span,
@@ -403,26 +397,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
403397 & [ args[ 0 ] . immediate ( ) , y] ,
404398 ) ;
405399
406- self . intcast ( ret, llret_ty , false )
400+ self . intcast ( ret, result . layout . llvm_type ( self ) , false )
407401 }
408402 sym:: ctlz_nonzero => {
409403 let y = self . const_bool ( true ) ;
410404 let llvm_name = & format ! ( "llvm.ctlz.i{width}" ) ;
411405 let ret = self . call_intrinsic ( llvm_name, & [ args[ 0 ] . immediate ( ) , y] ) ;
412- self . intcast ( ret, llret_ty , false )
406+ self . intcast ( ret, result . layout . llvm_type ( self ) , false )
413407 }
414408 sym:: cttz_nonzero => {
415409 let y = self . const_bool ( true ) ;
416410 let llvm_name = & format ! ( "llvm.cttz.i{width}" ) ;
417411 let ret = self . call_intrinsic ( llvm_name, & [ args[ 0 ] . immediate ( ) , y] ) ;
418- self . intcast ( ret, llret_ty , false )
412+ self . intcast ( ret, result . layout . llvm_type ( self ) , false )
419413 }
420414 sym:: ctpop => {
421415 let ret = self . call_intrinsic (
422416 & format ! ( "llvm.ctpop.i{width}" ) ,
423417 & [ args[ 0 ] . immediate ( ) ] ,
424418 ) ;
425- self . intcast ( ret, llret_ty , false )
419+ self . intcast ( ret, result . layout . llvm_type ( self ) , false )
426420 }
427421 sym:: bswap => {
428422 if width == 8 {
@@ -554,16 +548,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
554548 // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
555549 // This gives them the expected layout of a regular #[repr(simd)] vector.
556550 let mut loaded_args = Vec :: new ( ) ;
557- for ( ty , arg) in arg_tys . iter ( ) . zip ( args) {
551+ for arg in args {
558552 loaded_args. push (
559553 // #[repr(packed, simd)] vectors are passed like arrays (as references,
560554 // with reduced alignment and no padding) rather than as immediates.
561555 // We can use a vector load to fix the layout and turn the argument
562556 // into an immediate.
563- if ty. is_simd ( )
557+ if arg . layout . ty . is_simd ( )
564558 && let OperandValue :: Ref ( place) = arg. val
565559 {
566- let ( size, elem_ty) = ty. simd_size_and_type ( self . tcx ( ) ) ;
560+ let ( size, elem_ty) = arg . layout . ty . simd_size_and_type ( self . tcx ( ) ) ;
567561 let elem_ll_ty = match elem_ty. kind ( ) {
568562 ty:: Float ( f) => self . type_float_from_ty ( * f) ,
569563 ty:: Int ( i) => self . type_int_from_ty ( * i) ,
@@ -580,10 +574,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
580574 ) ;
581575 }
582576
583- let llret_ty = if ret_ty . is_simd ( )
584- && let BackendRepr :: Memory { .. } = self . layout_of ( ret_ty ) . layout . backend_repr
577+ let llret_ty = if result . layout . ty . is_simd ( )
578+ && let BackendRepr :: Memory { .. } = result . layout . backend_repr
585579 {
586- let ( size, elem_ty) = ret_ty . simd_size_and_type ( self . tcx ( ) ) ;
580+ let ( size, elem_ty) = result . layout . ty . simd_size_and_type ( self . tcx ( ) ) ;
587581 let elem_ll_ty = match elem_ty. kind ( ) {
588582 ty:: Float ( f) => self . type_float_from_ty ( * f) ,
589583 ty:: Int ( i) => self . type_int_from_ty ( * i) ,
@@ -593,7 +587,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
593587 } ;
594588 self . type_vector ( elem_ll_ty, size)
595589 } else {
596- llret_ty
590+ result . layout . llvm_type ( self )
597591 } ;
598592
599593 match generic_simd_intrinsic (
@@ -602,7 +596,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
602596 callee_ty,
603597 fn_args,
604598 & loaded_args,
605- ret_ty ,
599+ result . layout . ty ,
606600 llret_ty,
607601 span,
608602 ) {
0 commit comments