@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
124124 return err ;
125125}
126126
127- static inline bool rxe_check_pagefault (struct ib_umem_odp * umem_odp ,
128- u64 iova , int length , u32 perm )
127+ static inline bool rxe_check_pagefault (struct ib_umem_odp * umem_odp , u64 iova ,
128+ int length )
129129{
130130 bool need_fault = false;
131131 u64 addr ;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
137137 while (addr < iova + length ) {
138138 idx = (addr - ib_umem_start (umem_odp )) >> umem_odp -> page_shift ;
139139
140- if (!(umem_odp -> map .pfn_list [idx ] & perm )) {
140+ if (!(umem_odp -> map .pfn_list [idx ] & HMM_PFN_VALID )) {
141141 need_fault = true;
142142 break ;
143143 }
@@ -161,18 +161,14 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
161161{
162162 struct ib_umem_odp * umem_odp = to_ib_umem_odp (mr -> umem );
163163 bool need_fault ;
164- u64 perm = 0 ;
165164 int err ;
166165
167166 if (unlikely (length < 1 ))
168167 return - EINVAL ;
169168
170- if (!(flags & RXE_PAGEFAULT_RDONLY ))
171- perm |= HMM_PFN_WRITE ;
172-
173169 mutex_lock (& umem_odp -> umem_mutex );
174170
175- need_fault = rxe_check_pagefault (umem_odp , iova , length , perm );
171+ need_fault = rxe_check_pagefault (umem_odp , iova , length );
176172 if (need_fault ) {
177173 mutex_unlock (& umem_odp -> umem_mutex );
178174
@@ -182,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
182178 if (err < 0 )
183179 return err ;
184180
185- need_fault = rxe_check_pagefault (umem_odp , iova , length , perm );
181+ need_fault = rxe_check_pagefault (umem_odp , iova , length );
186182 if (need_fault )
187183 return - EFAULT ;
188184 }
0 commit comments