@@ -953,7 +953,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = {
953953static int bpf_map_mmap (struct file * filp , struct vm_area_struct * vma )
954954{
955955 struct bpf_map * map = filp -> private_data ;
956- int err ;
956+ int err = 0 ;
957957
958958 if (!map -> ops -> map_mmap || !IS_ERR_OR_NULL (map -> record ))
959959 return - ENOTSUPP ;
@@ -977,24 +977,33 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
977977 err = - EACCES ;
978978 goto out ;
979979 }
980+ bpf_map_write_active_inc (map );
980981 }
982+ out :
983+ mutex_unlock (& map -> freeze_mutex );
984+ if (err )
985+ return err ;
981986
982987 /* set default open/close callbacks */
983988 vma -> vm_ops = & bpf_map_default_vmops ;
984989 vma -> vm_private_data = map ;
985990 vm_flags_clear (vma , VM_MAYEXEC );
991+ /* If mapping is read-only, then disallow potentially re-mapping with
992+ * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing
993+ * means that as far as BPF map's memory-mapped VMAs are concerned,
994+ * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set,
995+ * both should be set, so we can forget about VM_MAYWRITE and always
996+ * check just VM_WRITE
997+ */
986998 if (!(vma -> vm_flags & VM_WRITE ))
987- /* disallow re-mapping with PROT_WRITE */
988999 vm_flags_clear (vma , VM_MAYWRITE );
9891000
9901001 err = map -> ops -> map_mmap (map , vma );
991- if (err )
992- goto out ;
1002+ if (err ) {
1003+ if (vma -> vm_flags & VM_WRITE )
1004+ bpf_map_write_active_dec (map );
1005+ }
9931006
994- if (vma -> vm_flags & VM_MAYWRITE )
995- bpf_map_write_active_inc (map );
996- out :
997- mutex_unlock (& map -> freeze_mutex );
9981007 return err ;
9991008}
10001009
@@ -1927,8 +1936,6 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
19271936 return err ;
19281937}
19291938
1930- #define MAP_LOOKUP_RETRIES 3
1931-
19321939int generic_map_lookup_batch (struct bpf_map * map ,
19331940 const union bpf_attr * attr ,
19341941 union bpf_attr __user * uattr )
@@ -1938,8 +1945,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
19381945 void __user * values = u64_to_user_ptr (attr -> batch .values );
19391946 void __user * keys = u64_to_user_ptr (attr -> batch .keys );
19401947 void * buf , * buf_prevkey , * prev_key , * key , * value ;
1941- int err , retry = MAP_LOOKUP_RETRIES ;
19421948 u32 value_size , cp , max_count ;
1949+ int err ;
19431950
19441951 if (attr -> batch .elem_flags & ~BPF_F_LOCK )
19451952 return - EINVAL ;
@@ -1985,14 +1992,8 @@ int generic_map_lookup_batch(struct bpf_map *map,
19851992 err = bpf_map_copy_value (map , key , value ,
19861993 attr -> batch .elem_flags );
19871994
1988- if (err == - ENOENT ) {
1989- if (retry ) {
1990- retry -- ;
1991- continue ;
1992- }
1993- err = - EINTR ;
1994- break ;
1995- }
1995+ if (err == - ENOENT )
1996+ goto next_key ;
19961997
19971998 if (err )
19981999 goto free_buf ;
@@ -2007,12 +2008,12 @@ int generic_map_lookup_batch(struct bpf_map *map,
20072008 goto free_buf ;
20082009 }
20092010
2011+ cp ++ ;
2012+ next_key :
20102013 if (!prev_key )
20112014 prev_key = buf_prevkey ;
20122015
20132016 swap (prev_key , key );
2014- retry = MAP_LOOKUP_RETRIES ;
2015- cp ++ ;
20162017 cond_resched ();
20172018 }
20182019
0 commit comments