@@ -2686,6 +2686,142 @@ mod sealed {
26862686 impl_sl ! { vslw u32 }
26872687
26882688 impl_vec_shift ! { [ VectorSl vec_sl] ( vslb, vslh, vslw) }
2689+
2690+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
2691+ pub trait VectorSld {
2692+ unsafe fn vec_sld < const UIMM4 : i32 > ( self , b : Self ) -> Self ;
2693+ unsafe fn vec_sldw < const UIMM2 : i32 > ( self , b : Self ) -> Self ;
2694+ }
2695+
2696+ #[ inline]
2697+ #[ target_feature( enable = "altivec" ) ]
2698+ #[ cfg_attr( test, assert_instr( vsldoi, UIMM4 = 1 ) ) ]
2699+ unsafe fn vsldoi < const UIMM4 : i32 > (
2700+ a : vector_unsigned_char ,
2701+ b : vector_unsigned_char ,
2702+ ) -> vector_unsigned_char {
2703+ static_assert_uimm_bits ! ( UIMM4 , 4 ) ;
2704+ let d = UIMM4 as u8 ;
2705+ if cfg ! ( target_endian = "little" ) {
2706+ let perm = u8x16:: new (
2707+ 16 - d,
2708+ 17 - d,
2709+ 18 - d,
2710+ 19 - d,
2711+ 20 - d,
2712+ 21 - d,
2713+ 22 - d,
2714+ 23 - d,
2715+ 24 - d,
2716+ 25 - d,
2717+ 26 - d,
2718+ 27 - d,
2719+ 28 - d,
2720+ 29 - d,
2721+ 30 - d,
2722+ 31 - d,
2723+ ) ;
2724+
2725+ vec_perm ( b, a, transmute ( perm) )
2726+ } else {
2727+ let perm = u8x16:: new (
2728+ d,
2729+ d + 1 ,
2730+ d + 2 ,
2731+ d + 3 ,
2732+ d + 4 ,
2733+ d + 5 ,
2734+ d + 6 ,
2735+ d + 7 ,
2736+ d + 8 ,
2737+ d + 9 ,
2738+ d + 10 ,
2739+ d + 11 ,
2740+ d + 12 ,
2741+ d + 13 ,
2742+ d + 14 ,
2743+ d + 15 ,
2744+ ) ;
2745+ vec_perm ( a, b, transmute ( perm) )
2746+ }
2747+ }
2748+
2749+ // TODO: collapse the two once generic_const_exprs are usable.
2750+ #[ inline]
2751+ #[ target_feature( enable = "altivec" ) ]
2752+ #[ cfg_attr( test, assert_instr( xxsldwi, UIMM2 = 1 ) ) ]
2753+ unsafe fn xxsldwi < const UIMM2 : i32 > (
2754+ a : vector_unsigned_char ,
2755+ b : vector_unsigned_char ,
2756+ ) -> vector_unsigned_char {
2757+ static_assert_uimm_bits ! ( UIMM2 , 2 ) ;
2758+ let d = ( UIMM2 << 2 ) as u8 ;
2759+ if cfg ! ( target_endian = "little" ) {
2760+ let perm = u8x16:: new (
2761+ 16 - d,
2762+ 17 - d,
2763+ 18 - d,
2764+ 19 - d,
2765+ 20 - d,
2766+ 21 - d,
2767+ 22 - d,
2768+ 23 - d,
2769+ 24 - d,
2770+ 25 - d,
2771+ 26 - d,
2772+ 27 - d,
2773+ 28 - d,
2774+ 29 - d,
2775+ 30 - d,
2776+ 31 - d,
2777+ ) ;
2778+
2779+ vec_perm ( b, a, transmute ( perm) )
2780+ } else {
2781+ let perm = u8x16:: new (
2782+ d,
2783+ d + 1 ,
2784+ d + 2 ,
2785+ d + 3 ,
2786+ d + 4 ,
2787+ d + 5 ,
2788+ d + 6 ,
2789+ d + 7 ,
2790+ d + 8 ,
2791+ d + 9 ,
2792+ d + 10 ,
2793+ d + 11 ,
2794+ d + 12 ,
2795+ d + 13 ,
2796+ d + 14 ,
2797+ d + 15 ,
2798+ ) ;
2799+ vec_perm ( a, b, transmute ( perm) )
2800+ }
2801+ }
2802+
2803+ macro_rules! impl_vec_sld {
2804+ ( $( $ty: ident) ,+) => { $(
2805+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
2806+ impl VectorSld for $ty {
2807+ #[ inline]
2808+ #[ target_feature( enable = "altivec" ) ]
2809+ unsafe fn vec_sld<const UIMM4 : i32 >( self , b: Self ) -> Self {
2810+ transmute( vsldoi:: <UIMM4 >( transmute( self ) , transmute( b) ) )
2811+ }
2812+ #[ inline]
2813+ #[ target_feature( enable = "altivec" ) ]
2814+ unsafe fn vec_sldw<const UIMM2 : i32 >( self , b: Self ) -> Self {
2815+ transmute( xxsldwi:: <UIMM2 >( transmute( self ) , transmute( b) ) )
2816+ }
2817+ }
2818+ ) + } ;
2819+ }
2820+
2821+ impl_vec_sld ! { vector_bool_char, vector_signed_char, vector_unsigned_char }
2822+ impl_vec_sld ! { vector_bool_short, vector_signed_short, vector_unsigned_short }
2823+ impl_vec_sld ! { vector_bool_int, vector_signed_int, vector_unsigned_int }
2824+ impl_vec_sld ! { vector_float }
26892825}
26902826
26912827/// Vector Merge Low
@@ -2775,6 +2911,49 @@ where
27752911{
27762912 a. vec_sl ( b)
27772913}
2914+
2915+ /// Vector Shift Left Double
2916+ ///
2917+ /// ## Endian considerations
2918+ ///
2919+ /// This intrinsic is not endian-neutral, so uses of vec_sld in
2920+ /// big-endian code must be rewritten for little-endian targets.
2921+ ///
2922+ /// Historically, vec_sld could be used to shift by amounts not a multiple of the element size
2923+ /// for most types, in which case the purpose of the shift is difficult to determine and difficult
2924+ /// to automatically rewrite efficiently for little endian.
2925+ ///
2926+ /// So the concatenation of a and b is done in big-endian fashion (left to right), and the shift is
2927+ /// always to the left. This will generally produce surprising results for little-endian targets.
2928+ #[ inline]
2929+ #[ target_feature( enable = "altivec" ) ]
2930+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
2931+ pub unsafe fn vec_sld < T , const UIMM4 : i32 > ( a : T , b : T ) -> T
2932+ where
2933+ T : sealed:: VectorSld ,
2934+ {
2935+ a. vec_sld :: < UIMM4 > ( b)
2936+ }
2937+
2938+ /// Vector Shift Left Double by Words
2939+ ///
2940+ /// ## Endian considerations
2941+ ///
2942+ /// This intrinsic is not endian-neutral, so uses of vec_sldw in
2943+ /// big-endian code must be rewritten for little-endian targets.
2944+ ///
2945+ /// The concatenation of a and b is done in big-endian fashion (left to right), and the shift is
2946+ /// always to the left. This will generally produce surprising results for little- endian targets.
2947+ #[ inline]
2948+ #[ target_feature( enable = "altivec" ) ]
2949+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
2950+ pub unsafe fn vec_sldw < T , const UIMM2 : i32 > ( a : T , b : T ) -> T
2951+ where
2952+ T : sealed:: VectorSld ,
2953+ {
2954+ a. vec_sldw :: < UIMM2 > ( b)
2955+ }
2956+
27782957/// Vector Load Indexed.
27792958#[ inline]
27802959#[ target_feature( enable = "altivec" ) ]
0 commit comments