@@ -984,29 +984,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
984984 return - EINVAL ;
985985}
986986
987- /*
988- * track_pfn_copy is called when vma that is covering the pfnmap gets
989- * copied through copy_page_range().
990- *
991- * If the vma has a linear pfn mapping for the entire range, we get the prot
992- * from pte and reserve the entire vma range with single reserve_pfn_range call.
993- */
994- int track_pfn_copy (struct vm_area_struct * vma )
987+ int track_pfn_copy (struct vm_area_struct * dst_vma ,
988+ struct vm_area_struct * src_vma , unsigned long * pfn )
995989{
990+ const unsigned long vma_size = src_vma -> vm_end - src_vma -> vm_start ;
996991 resource_size_t paddr ;
997- unsigned long vma_size = vma -> vm_end - vma -> vm_start ;
998992 pgprot_t pgprot ;
993+ int rc ;
999994
1000- if (vma -> vm_flags & VM_PAT ) {
1001- if (get_pat_info (vma , & paddr , & pgprot ))
1002- return - EINVAL ;
1003- /* reserve the whole chunk covered by vma. */
1004- return reserve_pfn_range (paddr , vma_size , & pgprot , 1 );
1005- }
995+ if (!(src_vma -> vm_flags & VM_PAT ))
996+ return 0 ;
997+
998+ /*
999+ * Duplicate the PAT information for the dst VMA based on the src
1000+ * VMA.
1001+ */
1002+ if (get_pat_info (src_vma , & paddr , & pgprot ))
1003+ return - EINVAL ;
1004+ rc = reserve_pfn_range (paddr , vma_size , & pgprot , 1 );
1005+ if (rc )
1006+ return rc ;
10061007
1008+ /* Reservation for the destination VMA succeeded. */
1009+ vm_flags_set (dst_vma , VM_PAT );
1010+ * pfn = PHYS_PFN (paddr );
10071011 return 0 ;
10081012}
10091013
1014+ void untrack_pfn_copy (struct vm_area_struct * dst_vma , unsigned long pfn )
1015+ {
1016+ untrack_pfn (dst_vma , pfn , dst_vma -> vm_end - dst_vma -> vm_start , true);
1017+ /*
1018+ * Reservation was freed, any copied page tables will get cleaned
1019+ * up later, but without getting PAT involved again.
1020+ */
1021+ }
1022+
10101023/*
10111024 * prot is passed in as a parameter for the new mapping. If the vma has
10121025 * a linear pfn mapping for the entire range, or no vma is provided,
@@ -1095,15 +1108,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
10951108 }
10961109}
10971110
1098- /*
1099- * untrack_pfn_clear is called if the following situation fits:
1100- *
1101- * 1) while mremapping a pfnmap for a new region, with the old vma after
1102- * its pfnmap page table has been removed. The new vma has a new pfnmap
1103- * to the same pfn & cache type with VM_PAT set.
1104- * 2) while duplicating vm area, the new vma fails to copy the pgtable from
1105- * old vma.
1106- */
11071111void untrack_pfn_clear (struct vm_area_struct * vma )
11081112{
11091113 vm_flags_clear (vma , VM_PAT );
0 commit comments