@@ -1162,6 +1162,70 @@ mod sealed {
11621162 impl_abss ! { vec_abss_i16, i16x8 }
11631163 impl_abss ! { vec_abss_i32, i32x4 }
11641164
1165+ #[ inline]
1166+ #[ target_feature( enable = "altivec" ) ]
1167+ #[ cfg_attr( test, assert_instr( vspltb, IMM4 = 15 ) ) ]
1168+ unsafe fn vspltb < const IMM4 : u32 > ( a : vector_signed_char ) -> vector_signed_char {
1169+ static_assert_uimm_bits ! ( IMM4 , 4 ) ;
1170+ let b = u8x16:: splat ( IMM4 as u8 ) ;
1171+ vec_perm ( a, a, transmute ( b) )
1172+ }
1173+
1174+ #[ inline]
1175+ #[ target_feature( enable = "altivec" ) ]
1176+ #[ cfg_attr( test, assert_instr( vsplth, IMM3 = 7 ) ) ]
1177+ unsafe fn vsplth < const IMM3 : u32 > ( a : vector_signed_short ) -> vector_signed_short {
1178+ static_assert_uimm_bits ! ( IMM3 , 3 ) ;
1179+ let b0 = IMM3 as u8 * 2 ;
1180+ let b1 = b0 + 1 ;
1181+ let b = u8x16:: new (
1182+ b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
1183+ ) ;
1184+ vec_perm ( a, a, transmute ( b) )
1185+ }
1186+
1187+ #[ inline]
1188+ #[ target_feature( enable = "altivec" ) ]
1189+ #[ cfg_attr( all( test, not( target_feature = "vsx" ) ) , assert_instr( vspltw, IMM2 = 3 ) ) ]
1190+ #[ cfg_attr( all( test, target_feature = "vsx" ) , assert_instr( xxspltw, IMM2 = 3 ) ) ]
1191+ unsafe fn vspltw < const IMM2 : u32 > ( a : vector_signed_int ) -> vector_signed_int {
1192+ static_assert_uimm_bits ! ( IMM2 , 2 ) ;
1193+ let b0 = IMM2 as u8 * 4 ;
1194+ let b1 = b0 + 1 ;
1195+ let b2 = b0 + 2 ;
1196+ let b3 = b0 + 3 ;
1197+ let b = u8x16:: new (
1198+ b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, b2, b3, b0, b1, b2, b3,
1199+ ) ;
1200+ vec_perm ( a, a, transmute ( b) )
1201+ }
1202+
1203+ pub trait VectorSplat {
1204+ unsafe fn vec_splat < const IMM : u32 > ( self ) -> Self ;
1205+ }
1206+
1207+ macro_rules! impl_vec_splat {
1208+ ( $ty: ty, $fun: ident) => {
1209+ impl VectorSplat for $ty {
1210+ #[ inline]
1211+ #[ target_feature( enable = "altivec" ) ]
1212+ unsafe fn vec_splat<const IMM : u32 >( self ) -> Self {
1213+ transmute( $fun:: <IMM >( transmute( self ) ) )
1214+ }
1215+ }
1216+ } ;
1217+ }
1218+
1219+ impl_vec_splat ! { vector_signed_char, vspltb }
1220+ impl_vec_splat ! { vector_unsigned_char, vspltb }
1221+ impl_vec_splat ! { vector_bool_char, vspltb }
1222+ impl_vec_splat ! { vector_signed_short, vsplth }
1223+ impl_vec_splat ! { vector_unsigned_short, vsplth }
1224+ impl_vec_splat ! { vector_bool_short, vsplth }
1225+ impl_vec_splat ! { vector_signed_int, vspltw }
1226+ impl_vec_splat ! { vector_unsigned_int, vspltw }
1227+ impl_vec_splat ! { vector_bool_int, vspltw }
1228+
11651229 macro_rules! splat {
11661230 ( $name: ident, $v: ident, $r: ident [ $instr: ident, $doc: literal] ) => {
11671231 #[ doc = $doc]
@@ -2606,6 +2670,16 @@ where
26062670 a. vec_abss ( )
26072671}
26082672
2673+ /// Vector Splat
2674+ #[ inline]
2675+ #[ target_feature( enable = "altivec" ) ]
2676+ pub unsafe fn vec_splat < T , const IMM : u32 > ( a : T ) -> T
2677+ where
2678+ T : sealed:: VectorSplat ,
2679+ {
2680+ a. vec_splat :: < IMM > ( )
2681+ }
2682+
26092683splat ! { vec_splat_u8, u8 , u8x16 [ vspltisb, "Vector Splat to Unsigned Byte" ] }
26102684splat ! { vec_splat_i8, i8 , i8x16 [ vspltisb, "Vector Splat to Signed Byte" ] }
26112685splat ! { vec_splat_u16, u16 , u16x8 [ vspltish, "Vector Splat to Unsigned Halfword" ] }
0 commit comments