@@ -283,7 +283,7 @@ static u64 __pkvm_mapping_start(struct pkvm_mapping *m)
283283
284284static u64 __pkvm_mapping_end (struct pkvm_mapping * m )
285285{
286- return (m -> gfn + 1 ) * PAGE_SIZE - 1 ;
286+ return (m -> gfn + m -> nr_pages ) * PAGE_SIZE - 1 ;
287287}
288288
289289INTERVAL_TREE_DEFINE (struct pkvm_mapping , node , u64 , __subtree_last ,
@@ -324,7 +324,8 @@ static int __pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 start, u64 e
324324 return 0 ;
325325
326326 for_each_mapping_in_range_safe (pgt , start , end , mapping ) {
327- ret = kvm_call_hyp_nvhe (__pkvm_host_unshare_guest , handle , mapping -> gfn , 1 );
327+ ret = kvm_call_hyp_nvhe (__pkvm_host_unshare_guest , handle , mapping -> gfn ,
328+ mapping -> nr_pages );
328329 if (WARN_ON (ret ))
329330 return ret ;
330331 pkvm_mapping_remove (mapping , & pgt -> pkvm_mappings );
@@ -354,16 +355,32 @@ int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
354355 return - EINVAL ;
355356
356357 lockdep_assert_held_write (& kvm -> mmu_lock );
357- ret = kvm_call_hyp_nvhe (__pkvm_host_share_guest , pfn , gfn , 1 , prot );
358- if (ret ) {
359- /* Is the gfn already mapped due to a racing vCPU? */
360- if (ret == - EPERM )
358+
359+ /*
360+ * Calling stage2_map() on top of existing mappings is either happening because of a race
361+ * with another vCPU, or because we're changing between page and block mappings. As per
362+ * user_mem_abort(), same-size permission faults are handled in the relax_perms() path.
363+ */
364+ mapping = pkvm_mapping_iter_first (& pgt -> pkvm_mappings , addr , addr + size - 1 );
365+ if (mapping ) {
366+ if (size == (mapping -> nr_pages * PAGE_SIZE ))
361367 return - EAGAIN ;
368+
369+ /* Remove _any_ pkvm_mapping overlapping with the range, bigger or smaller. */
370+ ret = __pkvm_pgtable_stage2_unmap (pgt , addr , addr + size );
371+ if (ret )
372+ return ret ;
373+ mapping = NULL ;
362374 }
363375
376+ ret = kvm_call_hyp_nvhe (__pkvm_host_share_guest , pfn , gfn , size / PAGE_SIZE , prot );
377+ if (WARN_ON (ret ))
378+ return ret ;
379+
364380 swap (mapping , cache -> mapping );
365381 mapping -> gfn = gfn ;
366382 mapping -> pfn = pfn ;
383+ mapping -> nr_pages = size / PAGE_SIZE ;
367384 pkvm_mapping_insert (mapping , & pgt -> pkvm_mappings );
368385
369386 return ret ;
@@ -385,7 +402,8 @@ int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
385402
386403 lockdep_assert_held (& kvm -> mmu_lock );
387404 for_each_mapping_in_range_safe (pgt , addr , addr + size , mapping ) {
388- ret = kvm_call_hyp_nvhe (__pkvm_host_wrprotect_guest , handle , mapping -> gfn , 1 );
405+ ret = kvm_call_hyp_nvhe (__pkvm_host_wrprotect_guest , handle , mapping -> gfn ,
406+ mapping -> nr_pages );
389407 if (WARN_ON (ret ))
390408 break ;
391409 }
@@ -400,7 +418,8 @@ int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
400418
401419 lockdep_assert_held (& kvm -> mmu_lock );
402420 for_each_mapping_in_range_safe (pgt , addr , addr + size , mapping )
403- __clean_dcache_guest_page (pfn_to_kaddr (mapping -> pfn ), PAGE_SIZE );
421+ __clean_dcache_guest_page (pfn_to_kaddr (mapping -> pfn ),
422+ PAGE_SIZE * mapping -> nr_pages );
404423
405424 return 0 ;
406425}
@@ -415,7 +434,7 @@ bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64
415434 lockdep_assert_held (& kvm -> mmu_lock );
416435 for_each_mapping_in_range_safe (pgt , addr , addr + size , mapping )
417436 young |= kvm_call_hyp_nvhe (__pkvm_host_test_clear_young_guest , handle , mapping -> gfn ,
418- 1 , mkold );
437+ mapping -> nr_pages , mkold );
419438
420439 return young ;
421440}
0 commit comments