Skip to content

Commit f71f7af

Browse files
vdonnefortMarc Zyngier
authored andcommitted
KVM: arm64: Check range args for pKVM mem transitions
There's currently no verification for host issued ranges in most of the pKVM memory transitions. The end boundary might therefore be subject to overflow and later checks could be evaded. Close this loophole with an additional pfn_range_is_valid() check on a per public function basis. Once this check has passed, it is safe to convert pfn and nr_pages into a phys_addr_t and a size. host_unshare_guest transition is already protected via __check_host_shared_guest(), while assert_host_shared_guest() callers are already ignoring host checks. Signed-off-by: Vincent Donnefort <vdonnefort@google.com> Link: https://patch.msgid.link/20251016164541.3771235-1-vdonnefort@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent a186fbc commit f71f7af

File tree

1 file changed

+28
-0
lines changed

1 file changed

+28
-0
lines changed

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,19 @@ static int host_stage2_unmap_dev_all(void)
367367
return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
368368
}
369369

370+
/*
371+
* Ensure the PFN range is contained within PA-range.
372+
*
373+
* This check is also robust to overflows and is therefore a requirement before
374+
* using a pfn/nr_pages pair from an untrusted source.
375+
*/
376+
static bool pfn_range_is_valid(u64 pfn, u64 nr_pages)
377+
{
378+
u64 limit = BIT(kvm_phys_shift(&host_mmu.arch.mmu) - PAGE_SHIFT);
379+
380+
return pfn < limit && ((limit - pfn) >= nr_pages);
381+
}
382+
370383
struct kvm_mem_range {
371384
u64 start;
372385
u64 end;
@@ -776,6 +789,9 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
776789
void *virt = __hyp_va(phys);
777790
int ret;
778791

792+
if (!pfn_range_is_valid(pfn, nr_pages))
793+
return -EINVAL;
794+
779795
host_lock_component();
780796
hyp_lock_component();
781797

@@ -804,6 +820,9 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
804820
u64 virt = (u64)__hyp_va(phys);
805821
int ret;
806822

823+
if (!pfn_range_is_valid(pfn, nr_pages))
824+
return -EINVAL;
825+
807826
host_lock_component();
808827
hyp_lock_component();
809828

@@ -887,6 +906,9 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
887906
u64 size = PAGE_SIZE * nr_pages;
888907
int ret;
889908

909+
if (!pfn_range_is_valid(pfn, nr_pages))
910+
return -EINVAL;
911+
890912
host_lock_component();
891913
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
892914
if (!ret)
@@ -902,6 +924,9 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
902924
u64 size = PAGE_SIZE * nr_pages;
903925
int ret;
904926

927+
if (!pfn_range_is_valid(pfn, nr_pages))
928+
return -EINVAL;
929+
905930
host_lock_component();
906931
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
907932
if (!ret)
@@ -945,6 +970,9 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu
945970
if (prot & ~KVM_PGTABLE_PROT_RWX)
946971
return -EINVAL;
947972

973+
if (!pfn_range_is_valid(pfn, nr_pages))
974+
return -EINVAL;
975+
948976
ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
949977
if (ret)
950978
return ret;

0 commit comments

Comments
 (0)