1+ // reference: https://github.com/MabezDev/llvm-project/blob/xtensa_release_9.0.1_with_rust_patches-31-05-2020-cherry-pick/clang/lib/CodeGen/TargetInfo.cpp#L9668-L9767
2+
3+ use crate :: abi:: call:: { ArgAbi , FnAbi , Reg , Uniform } ;
4+ use crate :: abi:: { HasDataLayout , LayoutOf , TyAndLayout , TyAndLayoutMethods , Abi , Size } ;
5+ use crate :: spec:: HasTargetSpec ;
6+
7+ const NUM_ARG_GPRS : u64 = 6 ;
8+ const MAX_ARG_IN_REGS_SIZE : u64 = 4 * 32 ;
9+ const MAX_RET_IN_REGS_SIZE : u64 = 2 * 32 ;
10+
11+ fn classify_ret_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 ) {
12+ if arg. is_ignore ( ) {
13+ return ;
14+ }
15+
16+ // The rules for return and argument types are the same,
17+ // so defer to `classify_arg_ty`.
18+ let mut arg_gprs_left = 2 ;
19+ let fixed = true ;
20+ classify_arg_ty ( arg, xlen, fixed, & mut arg_gprs_left) ;
21+ }
22+
23+ fn classify_arg_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 , fixed : bool , arg_gprs_left : & mut u64 ) {
24+ assert ! ( * arg_gprs_left <= NUM_ARG_GPRS , "Arg GPR tracking underflow" ) ;
25+
26+ // Ignore empty structs/unions.
27+ if arg. layout . is_zst ( ) {
28+ return ;
29+ }
30+
31+ let size = arg. layout . size . bits ( ) ;
32+ let needed_align = arg. layout . align . abi . bits ( ) ;
33+ let mut must_use_stack = false ;
34+
35+ // Determine the number of GPRs needed to pass the current argument
36+ // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
37+ // register pairs, so may consume 3 registers.
38+ let mut needed_arg_gprs = 1u64 ;
39+
40+ if !fixed && needed_align == 2 * xlen {
41+ needed_arg_gprs = 2 + ( * arg_gprs_left % 2 ) ;
42+ } else if size > xlen && size <= MAX_ARG_IN_REGS_SIZE {
43+ needed_arg_gprs = ( size + xlen - 1 ) / xlen;
44+ }
45+
46+ if needed_arg_gprs > * arg_gprs_left {
47+ must_use_stack = true ;
48+ needed_arg_gprs = * arg_gprs_left;
49+ }
50+ * arg_gprs_left -= needed_arg_gprs;
51+
52+ if !arg. layout . is_aggregate ( ) && !matches ! ( arg. layout. abi, Abi :: Vector { .. } ) {
53+ // All integral types are promoted to `xlen`
54+ // width, unless passed on the stack.
55+ if size < xlen && !must_use_stack {
56+ arg. extend_integer_width_to ( xlen) ;
57+ return ;
58+ }
59+
60+ return ;
61+ }
62+
63+ // Aggregates which are <= 4 * 32 will be passed in
64+ // registers if possible, so coerce to integers.
65+ if size as u64 <= MAX_ARG_IN_REGS_SIZE {
66+ let alignment = arg. layout . align . abi . bits ( ) ;
67+
68+ // Use a single `xlen` int if possible, 2 * `xlen` if 2 * `xlen` alignment
69+ // is required, and a 2-element `xlen` array if only `xlen` alignment is
70+ // required.
71+ if size <= xlen {
72+ arg. cast_to ( Reg :: i32 ( ) ) ;
73+ return ;
74+ } else if alignment == 2 * xlen {
75+ arg. cast_to ( Reg :: i64 ( ) ) ;
76+ return ;
77+ } else {
78+ let total = Size :: from_bits ( ( ( size + xlen - 1 ) / xlen) * xlen) ;
79+ arg. cast_to ( Uniform { unit : Reg :: i32 ( ) , total } ) ;
80+ return ;
81+ }
82+ }
83+
84+ arg. make_indirect ( ) ;
85+ }
86+
87+ pub fn compute_abi_info < ' a , Ty , C > ( cx : & C , fn_abi : & mut FnAbi < ' a , Ty > )
88+ where
89+ Ty : TyAndLayoutMethods < ' a , C > + Copy ,
90+ C : LayoutOf < Ty = Ty , TyAndLayout = TyAndLayout < ' a , Ty > > + HasDataLayout + HasTargetSpec ,
91+ {
92+ let xlen = cx. data_layout ( ) . pointer_size . bits ( ) ;
93+
94+ if !fn_abi. ret . is_ignore ( ) {
95+ classify_ret_ty ( & mut fn_abi. ret , xlen) ;
96+ }
97+
98+ let is_ret_indirect =
99+ fn_abi. ret . is_indirect ( ) || fn_abi. ret . layout . size . bits ( ) > MAX_RET_IN_REGS_SIZE ;
100+
101+ let mut arg_gprs_left = if is_ret_indirect { NUM_ARG_GPRS - 1 } else { NUM_ARG_GPRS } ;
102+
103+ for arg in & mut fn_abi. args {
104+ if arg. is_ignore ( ) {
105+ continue ;
106+ }
107+ let fixed = true ;
108+ classify_arg_ty ( arg, xlen, fixed, & mut arg_gprs_left) ;
109+ }
110+ }
0 commit comments