@@ -14,11 +14,9 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
1414 for_each_possible_cpu (cpu ) {
1515 struct pcpu_freelist_head * head = per_cpu_ptr (s -> freelist , cpu );
1616
17- raw_spin_lock_init (& head -> lock );
17+ raw_res_spin_lock_init (& head -> lock );
1818 head -> first = NULL ;
1919 }
20- raw_spin_lock_init (& s -> extralist .lock );
21- s -> extralist .first = NULL ;
2220 return 0 ;
2321}
2422
@@ -34,58 +32,39 @@ static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
3432 WRITE_ONCE (head -> first , node );
3533}
3634
37- static inline void ___pcpu_freelist_push (struct pcpu_freelist_head * head ,
35+ static inline bool ___pcpu_freelist_push (struct pcpu_freelist_head * head ,
3836 struct pcpu_freelist_node * node )
3937{
40- raw_spin_lock (& head -> lock );
41- pcpu_freelist_push_node (head , node );
42- raw_spin_unlock (& head -> lock );
43- }
44-
45- static inline bool pcpu_freelist_try_push_extra (struct pcpu_freelist * s ,
46- struct pcpu_freelist_node * node )
47- {
48- if (!raw_spin_trylock (& s -> extralist .lock ))
38+ if (raw_res_spin_lock (& head -> lock ))
4939 return false;
50-
51- pcpu_freelist_push_node (& s -> extralist , node );
52- raw_spin_unlock (& s -> extralist .lock );
40+ pcpu_freelist_push_node (head , node );
41+ raw_res_spin_unlock (& head -> lock );
5342 return true;
5443}
5544
56- static inline void ___pcpu_freelist_push_nmi (struct pcpu_freelist * s ,
57- struct pcpu_freelist_node * node )
45+ void __pcpu_freelist_push (struct pcpu_freelist * s ,
46+ struct pcpu_freelist_node * node )
5847{
59- int cpu , orig_cpu ;
48+ struct pcpu_freelist_head * head ;
49+ int cpu ;
6050
61- orig_cpu = raw_smp_processor_id ();
62- while (1 ) {
63- for_each_cpu_wrap (cpu , cpu_possible_mask , orig_cpu ) {
64- struct pcpu_freelist_head * head ;
51+ if (___pcpu_freelist_push (this_cpu_ptr (s -> freelist ), node ))
52+ return ;
6553
54+ while (true) {
55+ for_each_cpu_wrap (cpu , cpu_possible_mask , raw_smp_processor_id ()) {
56+ if (cpu == raw_smp_processor_id ())
57+ continue ;
6658 head = per_cpu_ptr (s -> freelist , cpu );
67- if (raw_spin_trylock (& head -> lock )) {
68- pcpu_freelist_push_node (head , node );
69- raw_spin_unlock (& head -> lock );
70- return ;
71- }
72- }
73-
74- /* cannot lock any per cpu lock, try extralist */
75- if (pcpu_freelist_try_push_extra (s , node ))
59+ if (raw_res_spin_lock (& head -> lock ))
60+ continue ;
61+ pcpu_freelist_push_node (head , node );
62+ raw_res_spin_unlock (& head -> lock );
7663 return ;
64+ }
7765 }
7866}
7967
80- void __pcpu_freelist_push (struct pcpu_freelist * s ,
81- struct pcpu_freelist_node * node )
82- {
83- if (in_nmi ())
84- ___pcpu_freelist_push_nmi (s , node );
85- else
86- ___pcpu_freelist_push (this_cpu_ptr (s -> freelist ), node );
87- }
88-
8968void pcpu_freelist_push (struct pcpu_freelist * s ,
9069 struct pcpu_freelist_node * node )
9170{
@@ -120,71 +99,29 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
12099
121100static struct pcpu_freelist_node * ___pcpu_freelist_pop (struct pcpu_freelist * s )
122101{
102+ struct pcpu_freelist_node * node = NULL ;
123103 struct pcpu_freelist_head * head ;
124- struct pcpu_freelist_node * node ;
125104 int cpu ;
126105
127106 for_each_cpu_wrap (cpu , cpu_possible_mask , raw_smp_processor_id ()) {
128107 head = per_cpu_ptr (s -> freelist , cpu );
129108 if (!READ_ONCE (head -> first ))
130109 continue ;
131- raw_spin_lock (& head -> lock );
110+ if (raw_res_spin_lock (& head -> lock ))
111+ continue ;
132112 node = head -> first ;
133113 if (node ) {
134114 WRITE_ONCE (head -> first , node -> next );
135- raw_spin_unlock (& head -> lock );
115+ raw_res_spin_unlock (& head -> lock );
136116 return node ;
137117 }
138- raw_spin_unlock (& head -> lock );
118+ raw_res_spin_unlock (& head -> lock );
139119 }
140-
141- /* per cpu lists are all empty, try extralist */
142- if (!READ_ONCE (s -> extralist .first ))
143- return NULL ;
144- raw_spin_lock (& s -> extralist .lock );
145- node = s -> extralist .first ;
146- if (node )
147- WRITE_ONCE (s -> extralist .first , node -> next );
148- raw_spin_unlock (& s -> extralist .lock );
149- return node ;
150- }
151-
152- static struct pcpu_freelist_node *
153- ___pcpu_freelist_pop_nmi (struct pcpu_freelist * s )
154- {
155- struct pcpu_freelist_head * head ;
156- struct pcpu_freelist_node * node ;
157- int cpu ;
158-
159- for_each_cpu_wrap (cpu , cpu_possible_mask , raw_smp_processor_id ()) {
160- head = per_cpu_ptr (s -> freelist , cpu );
161- if (!READ_ONCE (head -> first ))
162- continue ;
163- if (raw_spin_trylock (& head -> lock )) {
164- node = head -> first ;
165- if (node ) {
166- WRITE_ONCE (head -> first , node -> next );
167- raw_spin_unlock (& head -> lock );
168- return node ;
169- }
170- raw_spin_unlock (& head -> lock );
171- }
172- }
173-
174- /* cannot pop from per cpu lists, try extralist */
175- if (!READ_ONCE (s -> extralist .first ) || !raw_spin_trylock (& s -> extralist .lock ))
176- return NULL ;
177- node = s -> extralist .first ;
178- if (node )
179- WRITE_ONCE (s -> extralist .first , node -> next );
180- raw_spin_unlock (& s -> extralist .lock );
181120 return node ;
182121}
183122
184123struct pcpu_freelist_node * __pcpu_freelist_pop (struct pcpu_freelist * s )
185124{
186- if (in_nmi ())
187- return ___pcpu_freelist_pop_nmi (s );
188125 return ___pcpu_freelist_pop (s );
189126}
190127
0 commit comments