@@ -298,30 +298,18 @@ EXPORT_SYMBOL(ib_umem_odp_release);
298298static int ib_umem_odp_map_dma_single_page (
299299 struct ib_umem_odp * umem_odp ,
300300 unsigned int dma_index ,
301- struct page * page ,
302- u64 access_mask )
301+ struct page * page )
303302{
304303 struct ib_device * dev = umem_odp -> umem .ibdev ;
305304 dma_addr_t * dma_addr = & umem_odp -> dma_list [dma_index ];
306305
307- if (* dma_addr ) {
308- /*
309- * If the page is already dma mapped it means it went through
310- * a non-invalidating trasition, like read-only to writable.
311- * Resync the flags.
312- */
313- * dma_addr = (* dma_addr & ODP_DMA_ADDR_MASK ) | access_mask ;
314- return 0 ;
315- }
316-
317306 * dma_addr = ib_dma_map_page (dev , page , 0 , 1 << umem_odp -> page_shift ,
318307 DMA_BIDIRECTIONAL );
319308 if (ib_dma_mapping_error (dev , * dma_addr )) {
320309 * dma_addr = 0 ;
321310 return - EFAULT ;
322311 }
323312 umem_odp -> npages ++ ;
324- * dma_addr |= access_mask ;
325313 return 0 ;
326314}
327315
@@ -357,9 +345,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
357345 struct hmm_range range = {};
358346 unsigned long timeout ;
359347
360- if (access_mask == 0 )
361- return - EINVAL ;
362-
363348 if (user_virt < ib_umem_start (umem_odp ) ||
364349 user_virt + bcnt > ib_umem_end (umem_odp ))
365350 return - EFAULT ;
@@ -385,7 +370,7 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
385370 if (fault ) {
386371 range .default_flags = HMM_PFN_REQ_FAULT ;
387372
388- if (access_mask & ODP_WRITE_ALLOWED_BIT )
373+ if (access_mask & HMM_PFN_WRITE )
389374 range .default_flags |= HMM_PFN_REQ_WRITE ;
390375 }
391376
@@ -417,22 +402,17 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
417402 for (pfn_index = 0 ; pfn_index < num_pfns ;
418403 pfn_index += 1 << (page_shift - PAGE_SHIFT ), dma_index ++ ) {
419404
420- if (fault ) {
421- /*
422- * Since we asked for hmm_range_fault() to populate
423- * pages it shouldn't return an error entry on success.
424- */
425- WARN_ON (range .hmm_pfns [pfn_index ] & HMM_PFN_ERROR );
426- WARN_ON (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ));
427- } else {
428- if (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID )) {
429- WARN_ON (umem_odp -> dma_list [dma_index ]);
430- continue ;
431- }
432- access_mask = ODP_READ_ALLOWED_BIT ;
433- if (range .hmm_pfns [pfn_index ] & HMM_PFN_WRITE )
434- access_mask |= ODP_WRITE_ALLOWED_BIT ;
435- }
405+ /*
406+ * Since we asked for hmm_range_fault() to populate
407+ * pages it shouldn't return an error entry on success.
408+ */
409+ WARN_ON (fault && range .hmm_pfns [pfn_index ] & HMM_PFN_ERROR );
410+ WARN_ON (fault && !(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ));
411+ if (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ))
412+ continue ;
413+
414+ if (range .hmm_pfns [pfn_index ] & HMM_PFN_DMA_MAPPED )
415+ continue ;
436416
437417 hmm_order = hmm_pfn_to_map_order (range .hmm_pfns [pfn_index ]);
438418 /* If a hugepage was detected and ODP wasn't set for, the umem
@@ -447,13 +427,14 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
447427 }
448428
449429 ret = ib_umem_odp_map_dma_single_page (
450- umem_odp , dma_index , hmm_pfn_to_page ( range . hmm_pfns [ pfn_index ]) ,
451- access_mask );
430+ umem_odp , dma_index ,
431+ hmm_pfn_to_page ( range . hmm_pfns [ pfn_index ]) );
452432 if (ret < 0 ) {
453433 ibdev_dbg (umem_odp -> umem .ibdev ,
454434 "ib_umem_odp_map_dma_single_page failed with error %d\n" , ret );
455435 break ;
456436 }
437+ range .hmm_pfns [pfn_index ] |= HMM_PFN_DMA_MAPPED ;
457438 }
458439 /* upon success lock should stay on hold for the callee */
459440 if (!ret )
@@ -473,7 +454,6 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
473454void ib_umem_odp_unmap_dma_pages (struct ib_umem_odp * umem_odp , u64 virt ,
474455 u64 bound )
475456{
476- dma_addr_t dma_addr ;
477457 dma_addr_t dma ;
478458 int idx ;
479459 u64 addr ;
@@ -484,34 +464,37 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
484464 virt = max_t (u64 , virt , ib_umem_start (umem_odp ));
485465 bound = min_t (u64 , bound , ib_umem_end (umem_odp ));
486466 for (addr = virt ; addr < bound ; addr += BIT (umem_odp -> page_shift )) {
467+ unsigned long pfn_idx = (addr - ib_umem_start (umem_odp )) >>
468+ PAGE_SHIFT ;
469+ struct page * page =
470+ hmm_pfn_to_page (umem_odp -> pfn_list [pfn_idx ]);
471+
487472 idx = (addr - ib_umem_start (umem_odp )) >> umem_odp -> page_shift ;
488473 dma = umem_odp -> dma_list [idx ];
489474
490- /* The access flags guaranteed a valid DMA address in case was NULL */
491- if (dma ) {
492- unsigned long pfn_idx = (addr - ib_umem_start (umem_odp )) >> PAGE_SHIFT ;
493- struct page * page = hmm_pfn_to_page (umem_odp -> pfn_list [pfn_idx ]);
494-
495- dma_addr = dma & ODP_DMA_ADDR_MASK ;
496- ib_dma_unmap_page (dev , dma_addr ,
497- BIT (umem_odp -> page_shift ),
498- DMA_BIDIRECTIONAL );
499- if (dma & ODP_WRITE_ALLOWED_BIT ) {
500- struct page * head_page = compound_head (page );
501- /*
502- * set_page_dirty prefers being called with
503- * the page lock. However, MMU notifiers are
504- * called sometimes with and sometimes without
505- * the lock. We rely on the umem_mutex instead
506- * to prevent other mmu notifiers from
507- * continuing and allowing the page mapping to
508- * be removed.
509- */
510- set_page_dirty (head_page );
511- }
512- umem_odp -> dma_list [idx ] = 0 ;
513- umem_odp -> npages -- ;
475+ if (!(umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_VALID ))
476+ goto clear ;
477+ if (!(umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_DMA_MAPPED ))
478+ goto clear ;
479+
480+ ib_dma_unmap_page (dev , dma , BIT (umem_odp -> page_shift ),
481+ DMA_BIDIRECTIONAL );
482+ if (umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_WRITE ) {
483+ struct page * head_page = compound_head (page );
484+ /*
485+ * set_page_dirty prefers being called with
486+ * the page lock. However, MMU notifiers are
487+ * called sometimes with and sometimes without
488+ * the lock. We rely on the umem_mutex instead
489+ * to prevent other mmu notifiers from
490+ * continuing and allowing the page mapping to
491+ * be removed.
492+ */
493+ set_page_dirty (head_page );
514494 }
495+ umem_odp -> npages -- ;
496+ clear :
497+ umem_odp -> pfn_list [pfn_idx ] &= ~HMM_PFN_FLAGS ;
515498 }
516499}
517500EXPORT_SYMBOL (ib_umem_odp_unmap_dma_pages );
0 commit comments