1616#include "bpf_lru_list.h"
1717#include "map_in_map.h"
1818#include <linux/bpf_mem_alloc.h>
19+ #include <asm/rqspinlock.h>
1920
2021#define HTAB_CREATE_FLAG_MASK \
2122 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
7879 */
7980struct bucket {
8081 struct hlist_nulls_head head ;
81- raw_spinlock_t raw_lock ;
82+ rqspinlock_t raw_lock ;
8283};
8384
8485#define HASHTAB_MAP_LOCK_COUNT 8
@@ -104,8 +105,6 @@ struct bpf_htab {
104105 u32 n_buckets ; /* number of hash buckets */
105106 u32 elem_size ; /* size of each element in bytes */
106107 u32 hashrnd ;
107- struct lock_class_key lockdep_key ;
108- int __percpu * map_locked [HASHTAB_MAP_LOCK_COUNT ];
109108};
110109
111110/* each htab element is struct htab_elem + key + value */
@@ -140,45 +139,26 @@ static void htab_init_buckets(struct bpf_htab *htab)
140139
141140 for (i = 0 ; i < htab -> n_buckets ; i ++ ) {
142141 INIT_HLIST_NULLS_HEAD (& htab -> buckets [i ].head , i );
143- raw_spin_lock_init (& htab -> buckets [i ].raw_lock );
144- lockdep_set_class (& htab -> buckets [i ].raw_lock ,
145- & htab -> lockdep_key );
142+ raw_res_spin_lock_init (& htab -> buckets [i ].raw_lock );
146143 cond_resched ();
147144 }
148145}
149146
150- static inline int htab_lock_bucket (const struct bpf_htab * htab ,
151- struct bucket * b , u32 hash ,
152- unsigned long * pflags )
147+ static inline int htab_lock_bucket (struct bucket * b , unsigned long * pflags )
153148{
154149 unsigned long flags ;
150+ int ret ;
155151
156- hash = hash & min_t (u32 , HASHTAB_MAP_LOCK_MASK , htab -> n_buckets - 1 );
157-
158- preempt_disable ();
159- local_irq_save (flags );
160- if (unlikely (__this_cpu_inc_return (* (htab -> map_locked [hash ])) != 1 )) {
161- __this_cpu_dec (* (htab -> map_locked [hash ]));
162- local_irq_restore (flags );
163- preempt_enable ();
164- return - EBUSY ;
165- }
166-
167- raw_spin_lock (& b -> raw_lock );
152+ ret = raw_res_spin_lock_irqsave (& b -> raw_lock , flags );
153+ if (ret )
154+ return ret ;
168155 * pflags = flags ;
169-
170156 return 0 ;
171157}
172158
173- static inline void htab_unlock_bucket (const struct bpf_htab * htab ,
174- struct bucket * b , u32 hash ,
175- unsigned long flags )
159+ static inline void htab_unlock_bucket (struct bucket * b , unsigned long flags )
176160{
177- hash = hash & min_t (u32 , HASHTAB_MAP_LOCK_MASK , htab -> n_buckets - 1 );
178- raw_spin_unlock (& b -> raw_lock );
179- __this_cpu_dec (* (htab -> map_locked [hash ]));
180- local_irq_restore (flags );
181- preempt_enable ();
161+ raw_res_spin_unlock_irqrestore (& b -> raw_lock , flags );
182162}
183163
184164static bool htab_lru_map_delete_node (void * arg , struct bpf_lru_node * node );
@@ -483,14 +463,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
483463 bool percpu_lru = (attr -> map_flags & BPF_F_NO_COMMON_LRU );
484464 bool prealloc = !(attr -> map_flags & BPF_F_NO_PREALLOC );
485465 struct bpf_htab * htab ;
486- int err , i ;
466+ int err ;
487467
488468 htab = bpf_map_area_alloc (sizeof (* htab ), NUMA_NO_NODE );
489469 if (!htab )
490470 return ERR_PTR (- ENOMEM );
491471
492- lockdep_register_key (& htab -> lockdep_key );
493-
494472 bpf_map_init_from_attr (& htab -> map , attr );
495473
496474 if (percpu_lru ) {
@@ -536,15 +514,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
536514 if (!htab -> buckets )
537515 goto free_elem_count ;
538516
539- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ ) {
540- htab -> map_locked [i ] = bpf_map_alloc_percpu (& htab -> map ,
541- sizeof (int ),
542- sizeof (int ),
543- GFP_USER );
544- if (!htab -> map_locked [i ])
545- goto free_map_locked ;
546- }
547-
548517 if (htab -> map .map_flags & BPF_F_ZERO_SEED )
549518 htab -> hashrnd = 0 ;
550519 else
@@ -607,15 +576,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
607576free_map_locked :
608577 if (htab -> use_percpu_counter )
609578 percpu_counter_destroy (& htab -> pcount );
610- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
611- free_percpu (htab -> map_locked [i ]);
612579 bpf_map_area_free (htab -> buckets );
613580 bpf_mem_alloc_destroy (& htab -> pcpu_ma );
614581 bpf_mem_alloc_destroy (& htab -> ma );
615582free_elem_count :
616583 bpf_map_free_elem_count (& htab -> map );
617584free_htab :
618- lockdep_unregister_key (& htab -> lockdep_key );
619585 bpf_map_area_free (htab );
620586 return ERR_PTR (err );
621587}
@@ -820,7 +786,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
820786 b = __select_bucket (htab , tgt_l -> hash );
821787 head = & b -> head ;
822788
823- ret = htab_lock_bucket (htab , b , tgt_l -> hash , & flags );
789+ ret = htab_lock_bucket (b , & flags );
824790 if (ret )
825791 return false;
826792
@@ -831,7 +797,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
831797 break ;
832798 }
833799
834- htab_unlock_bucket (htab , b , tgt_l -> hash , flags );
800+ htab_unlock_bucket (b , flags );
835801
836802 if (l == tgt_l )
837803 check_and_free_fields (htab , l );
@@ -1150,7 +1116,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11501116 */
11511117 }
11521118
1153- ret = htab_lock_bucket (htab , b , hash , & flags );
1119+ ret = htab_lock_bucket (b , & flags );
11541120 if (ret )
11551121 return ret ;
11561122
@@ -1201,7 +1167,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
12011167 check_and_free_fields (htab , l_old );
12021168 }
12031169 }
1204- htab_unlock_bucket (htab , b , hash , flags );
1170+ htab_unlock_bucket (b , flags );
12051171 if (l_old ) {
12061172 if (old_map_ptr )
12071173 map -> ops -> map_fd_put_ptr (map , old_map_ptr , true);
@@ -1210,7 +1176,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
12101176 }
12111177 return 0 ;
12121178err :
1213- htab_unlock_bucket (htab , b , hash , flags );
1179+ htab_unlock_bucket (b , flags );
12141180 return ret ;
12151181}
12161182
@@ -1257,7 +1223,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
12571223 copy_map_value (& htab -> map ,
12581224 l_new -> key + round_up (map -> key_size , 8 ), value );
12591225
1260- ret = htab_lock_bucket (htab , b , hash , & flags );
1226+ ret = htab_lock_bucket (b , & flags );
12611227 if (ret )
12621228 goto err_lock_bucket ;
12631229
@@ -1278,7 +1244,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
12781244 ret = 0 ;
12791245
12801246err :
1281- htab_unlock_bucket (htab , b , hash , flags );
1247+ htab_unlock_bucket (b , flags );
12821248
12831249err_lock_bucket :
12841250 if (ret )
@@ -1315,7 +1281,7 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
13151281 b = __select_bucket (htab , hash );
13161282 head = & b -> head ;
13171283
1318- ret = htab_lock_bucket (htab , b , hash , & flags );
1284+ ret = htab_lock_bucket (b , & flags );
13191285 if (ret )
13201286 return ret ;
13211287
@@ -1340,7 +1306,7 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
13401306 }
13411307 ret = 0 ;
13421308err :
1343- htab_unlock_bucket (htab , b , hash , flags );
1309+ htab_unlock_bucket (b , flags );
13441310 return ret ;
13451311}
13461312
@@ -1381,7 +1347,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
13811347 return - ENOMEM ;
13821348 }
13831349
1384- ret = htab_lock_bucket (htab , b , hash , & flags );
1350+ ret = htab_lock_bucket (b , & flags );
13851351 if (ret )
13861352 goto err_lock_bucket ;
13871353
@@ -1405,7 +1371,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
14051371 }
14061372 ret = 0 ;
14071373err :
1408- htab_unlock_bucket (htab , b , hash , flags );
1374+ htab_unlock_bucket (b , flags );
14091375err_lock_bucket :
14101376 if (l_new ) {
14111377 bpf_map_dec_elem_count (& htab -> map );
@@ -1447,7 +1413,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
14471413 b = __select_bucket (htab , hash );
14481414 head = & b -> head ;
14491415
1450- ret = htab_lock_bucket (htab , b , hash , & flags );
1416+ ret = htab_lock_bucket (b , & flags );
14511417 if (ret )
14521418 return ret ;
14531419
@@ -1457,7 +1423,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
14571423 else
14581424 ret = - ENOENT ;
14591425
1460- htab_unlock_bucket (htab , b , hash , flags );
1426+ htab_unlock_bucket (b , flags );
14611427
14621428 if (l )
14631429 free_htab_elem (htab , l );
@@ -1483,7 +1449,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
14831449 b = __select_bucket (htab , hash );
14841450 head = & b -> head ;
14851451
1486- ret = htab_lock_bucket (htab , b , hash , & flags );
1452+ ret = htab_lock_bucket (b , & flags );
14871453 if (ret )
14881454 return ret ;
14891455
@@ -1494,7 +1460,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
14941460 else
14951461 ret = - ENOENT ;
14961462
1497- htab_unlock_bucket (htab , b , hash , flags );
1463+ htab_unlock_bucket (b , flags );
14981464 if (l )
14991465 htab_lru_push_free (htab , l );
15001466 return ret ;
@@ -1561,7 +1527,6 @@ static void htab_map_free_timers_and_wq(struct bpf_map *map)
15611527static void htab_map_free (struct bpf_map * map )
15621528{
15631529 struct bpf_htab * htab = container_of (map , struct bpf_htab , map );
1564- int i ;
15651530
15661531 /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
15671532 * bpf_free_used_maps() is called after bpf prog is no longer executing.
@@ -1586,9 +1551,6 @@ static void htab_map_free(struct bpf_map *map)
15861551 bpf_mem_alloc_destroy (& htab -> ma );
15871552 if (htab -> use_percpu_counter )
15881553 percpu_counter_destroy (& htab -> pcount );
1589- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
1590- free_percpu (htab -> map_locked [i ]);
1591- lockdep_unregister_key (& htab -> lockdep_key );
15921554 bpf_map_area_free (htab );
15931555}
15941556
@@ -1631,7 +1593,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
16311593 b = __select_bucket (htab , hash );
16321594 head = & b -> head ;
16331595
1634- ret = htab_lock_bucket (htab , b , hash , & bflags );
1596+ ret = htab_lock_bucket (b , & bflags );
16351597 if (ret )
16361598 return ret ;
16371599
@@ -1668,7 +1630,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
16681630 hlist_nulls_del_rcu (& l -> hash_node );
16691631
16701632out_unlock :
1671- htab_unlock_bucket (htab , b , hash , bflags );
1633+ htab_unlock_bucket (b , bflags );
16721634
16731635 if (l ) {
16741636 if (is_lru_map )
@@ -1790,7 +1752,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17901752 head = & b -> head ;
17911753 /* do not grab the lock unless need it (bucket_cnt > 0). */
17921754 if (locked ) {
1793- ret = htab_lock_bucket (htab , b , batch , & flags );
1755+ ret = htab_lock_bucket (b , & flags );
17941756 if (ret ) {
17951757 rcu_read_unlock ();
17961758 bpf_enable_instrumentation ();
@@ -1813,7 +1775,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18131775 /* Note that since bucket_cnt > 0 here, it is implicit
18141776 * that the locked was grabbed, so release it.
18151777 */
1816- htab_unlock_bucket (htab , b , batch , flags );
1778+ htab_unlock_bucket (b , flags );
18171779 rcu_read_unlock ();
18181780 bpf_enable_instrumentation ();
18191781 goto after_loop ;
@@ -1824,7 +1786,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18241786 /* Note that since bucket_cnt > 0 here, it is implicit
18251787 * that the locked was grabbed, so release it.
18261788 */
1827- htab_unlock_bucket (htab , b , batch , flags );
1789+ htab_unlock_bucket (b , flags );
18281790 rcu_read_unlock ();
18291791 bpf_enable_instrumentation ();
18301792 kvfree (keys );
@@ -1887,7 +1849,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18871849 dst_val += value_size ;
18881850 }
18891851
1890- htab_unlock_bucket (htab , b , batch , flags );
1852+ htab_unlock_bucket (b , flags );
18911853 locked = false;
18921854
18931855 while (node_to_free ) {
0 commit comments