3939 */
4040
4141/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
42- #define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
42+ #define GUARD_SZ round_up (1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1 )
4343#define KERN_VM_SZ (SZ_4G + GUARD_SZ)
4444
4545struct bpf_arena {
@@ -138,7 +138,11 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
138138 INIT_LIST_HEAD (& arena -> vma_list );
139139 bpf_map_init_from_attr (& arena -> map , attr );
140140 range_tree_init (& arena -> rt );
141- range_tree_set (& arena -> rt , 0 , attr -> max_entries );
141+ err = range_tree_set (& arena -> rt , 0 , attr -> max_entries );
142+ if (err ) {
143+ bpf_map_area_free (arena );
144+ goto err ;
145+ }
142146 mutex_init (& arena -> lock );
143147
144148 return & arena -> map ;
@@ -218,7 +222,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
218222struct vma_list {
219223 struct vm_area_struct * vma ;
220224 struct list_head head ;
221- atomic_t mmap_count ;
225+ refcount_t mmap_count ;
222226};
223227
224228static int remember_vma (struct bpf_arena * arena , struct vm_area_struct * vma )
@@ -228,7 +232,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
228232 vml = kmalloc (sizeof (* vml ), GFP_KERNEL );
229233 if (!vml )
230234 return - ENOMEM ;
231- atomic_set (& vml -> mmap_count , 1 );
235+ refcount_set (& vml -> mmap_count , 1 );
232236 vma -> vm_private_data = vml ;
233237 vml -> vma = vma ;
234238 list_add (& vml -> head , & arena -> vma_list );
@@ -239,7 +243,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
239243{
240244 struct vma_list * vml = vma -> vm_private_data ;
241245
242- atomic_inc (& vml -> mmap_count );
246+ refcount_inc (& vml -> mmap_count );
243247}
244248
245249static void arena_vm_close (struct vm_area_struct * vma )
@@ -248,7 +252,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
248252 struct bpf_arena * arena = container_of (map , struct bpf_arena , map );
249253 struct vma_list * vml = vma -> vm_private_data ;
250254
251- if (!atomic_dec_and_test (& vml -> mmap_count ))
255+ if (!refcount_dec_and_test (& vml -> mmap_count ))
252256 return ;
253257 guard (mutex )(& arena -> lock );
254258 /* update link list under lock */
@@ -257,8 +261,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
257261 kfree (vml );
258262}
259263
260- #define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
261-
262264static vm_fault_t arena_vm_fault (struct vm_fault * vmf )
263265{
264266 struct bpf_map * map = vmf -> vma -> vm_file -> private_data ;
0 commit comments