Skip to content

Commit 8c5fa37

Browse files
fangyu0809avpatel
authored andcommitted
RISC-V: KVM: Remove automatic I/O mapping for VM_PFNMAP
As of commit aac6db7 ("vfio/pci: Use unmap_mapping_range()"), vm_pgoff may no longer guaranteed to hold the PFN for VM_PFNMAP regions. Using vma->vm_pgoff to derive the HPA here may therefore produce incorrect mappings. Instead, I/O mappings for such regions can be established on-demand during g-stage page faults, making the upfront ioremap in this path is unnecessary. Fixes: aac6db7 ("vfio/pci: Use unmap_mapping_range()") Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com> Tested-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Reviewed-by: Guo Ren <guoren@kernel.org> Reviewed-by: Anup Patel <anup@brainfault.org> Link: https://lore.kernel.org/r/20251021142131.78796-1-fangyu.yu@linux.alibaba.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent 873f10c commit 8c5fa37

File tree

1 file changed

+2
-23
lines changed

1 file changed

+2
-23
lines changed

arch/riscv/kvm/mmu.c

Lines changed: 2 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
171171
enum kvm_mr_change change)
172172
{
173173
hva_t hva, reg_end, size;
174-
gpa_t base_gpa;
175174
bool writable;
176175
int ret = 0;
177176

@@ -190,15 +189,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
190189
hva = new->userspace_addr;
191190
size = new->npages << PAGE_SHIFT;
192191
reg_end = hva + size;
193-
base_gpa = new->base_gfn << PAGE_SHIFT;
194192
writable = !(new->flags & KVM_MEM_READONLY);
195193

196194
mmap_read_lock(current->mm);
197195

198196
/*
199197
* A memory region could potentially cover multiple VMAs, and
200-
* any holes between them, so iterate over all of them to find
201-
* out if we can map any of them right now.
198+
* any holes between them, so iterate over all of them.
202199
*
203200
* +--------------------------------------------+
204201
* +---------------+----------------+ +----------------+
@@ -209,7 +206,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
209206
*/
210207
do {
211208
struct vm_area_struct *vma;
212-
hva_t vm_start, vm_end;
209+
hva_t vm_end;
213210

214211
vma = find_vma_intersection(current->mm, hva, reg_end);
215212
if (!vma)
@@ -225,36 +222,18 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
225222
}
226223

227224
/* Take the intersection of this VMA with the memory region */
228-
vm_start = max(hva, vma->vm_start);
229225
vm_end = min(reg_end, vma->vm_end);
230226

231227
if (vma->vm_flags & VM_PFNMAP) {
232-
gpa_t gpa = base_gpa + (vm_start - hva);
233-
phys_addr_t pa;
234-
235-
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
236-
pa += vm_start - vma->vm_start;
237-
238228
/* IO region dirty page logging not allowed */
239229
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
240230
ret = -EINVAL;
241231
goto out;
242232
}
243-
244-
ret = kvm_riscv_mmu_ioremap(kvm, gpa, pa, vm_end - vm_start,
245-
writable, false);
246-
if (ret)
247-
break;
248233
}
249234
hva = vm_end;
250235
} while (hva < reg_end);
251236

252-
if (change == KVM_MR_FLAGS_ONLY)
253-
goto out;
254-
255-
if (ret)
256-
kvm_riscv_mmu_iounmap(kvm, base_gpa, size);
257-
258237
out:
259238
mmap_read_unlock(current->mm);
260239
return ret;

0 commit comments

Comments
 (0)