@@ -5606,79 +5606,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
56065606}
56075607#endif /* __PAGETABLE_PMD_FOLDED */
56085608
5609- /**
5610- * follow_pte - look up PTE at a user virtual address
5611- * @vma: the memory mapping
5612- * @address: user virtual address
5613- * @ptepp: location to store found PTE
5614- * @ptlp: location to store the lock for the PTE
5615- *
5616- * On a successful return, the pointer to the PTE is stored in @ptepp;
5617- * the corresponding lock is taken and its location is stored in @ptlp.
5618- *
5619- * The contents of the PTE are only stable until @ptlp is released using
5620- * pte_unmap_unlock(). This function will fail if the PTE is non-present.
5621- * Present PTEs may include PTEs that map refcounted pages, such as
5622- * anonymous folios in COW mappings.
5623- *
5624- * Callers must be careful when relying on PTE content after
5625- * pte_unmap_unlock(). Especially if the PTE maps a refcounted page,
5626- * callers must protect against invalidation with MMU notifiers; otherwise
5627- * access to the PFN at a later point in time can trigger use-after-free.
5628- *
5629- * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
5630- * should be taken for read.
5631- *
5632- * This function must not be used to modify PTE content.
5633- *
5634- * Return: zero on success, -ve otherwise.
5635- */
5636- int follow_pte (struct vm_area_struct * vma , unsigned long address ,
5637- pte_t * * ptepp , spinlock_t * * ptlp )
5638- {
5639- struct mm_struct * mm = vma -> vm_mm ;
5640- pgd_t * pgd ;
5641- p4d_t * p4d ;
5642- pud_t * pud ;
5643- pmd_t * pmd ;
5644- pte_t * ptep ;
5645-
5646- mmap_assert_locked (mm );
5647- if (unlikely (address < vma -> vm_start || address >= vma -> vm_end ))
5648- goto out ;
5649-
5650- if (!(vma -> vm_flags & (VM_IO | VM_PFNMAP )))
5651- goto out ;
5652-
5653- pgd = pgd_offset (mm , address );
5654- if (pgd_none (* pgd ) || unlikely (pgd_bad (* pgd )))
5655- goto out ;
5656-
5657- p4d = p4d_offset (pgd , address );
5658- if (p4d_none (* p4d ) || unlikely (p4d_bad (* p4d )))
5659- goto out ;
5660-
5661- pud = pud_offset (p4d , address );
5662- if (pud_none (* pud ) || unlikely (pud_bad (* pud )))
5663- goto out ;
5664-
5665- pmd = pmd_offset (pud , address );
5666- VM_BUG_ON (pmd_trans_huge (* pmd ));
5667-
5668- ptep = pte_offset_map_lock (mm , pmd , address , ptlp );
5669- if (!ptep )
5670- goto out ;
5671- if (!pte_present (ptep_get (ptep )))
5672- goto unlock ;
5673- * ptepp = ptep ;
5674- return 0 ;
5675- unlock :
5676- pte_unmap_unlock (ptep , * ptlp );
5677- out :
5678- return - EINVAL ;
5679- }
5680- EXPORT_SYMBOL_GPL (follow_pte );
5681-
56825609static inline void pfnmap_args_setup (struct follow_pfnmap_args * args ,
56835610 spinlock_t * lock , pte_t * ptep ,
56845611 pgprot_t pgprot , unsigned long pfn_base ,
0 commit comments