Skip to content

Commit 5d3341d

Browse files
committed
KVM: guest_memfd: Invalidate SHARED GPAs if gmem supports INIT_SHARED
When invalidating gmem ranges, e.g. in response to PUNCH_HOLE, process all possible range types (PRIVATE vs. SHARED) for the gmem instance. Since since guest_memfd doesn't yet support in-place conversions, simply pivot on INIT_SHARED as a gmem instance can currently only have private or shared memory, not both. Failure to mark shared GPAs for invalidation is benign in the current code base, as only x86's TDX consumes KVM_FILTER_{PRIVATE,SHARED}, and TDX doesn't yet support INIT_SHARED with guest_memfd. However, invalidating only private GPAs is conceptually wrong and a lurking bug, e.g. could result in missed invalidations if ARM starts filtering invalidations based on attributes. Fixes: 3d3a04f ("KVM: Allow and advertise support for host mmap() on guest_memfd files") Reviewed-by: Ackerley Tng <ackerleytng@google.com> Reviewed-by: David Hildenbrand <david@redhat.com> Link: https://lore.kernel.org/r/20251003232606.4070510-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent fe2bf62 commit 5d3341d

File tree

1 file changed

+44
-20
lines changed

1 file changed

+44
-20
lines changed

virt/kvm/guest_memfd.c

Lines changed: 44 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,17 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
102102
return filemap_grab_folio(inode->i_mapping, index);
103103
}
104104

105-
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
106-
pgoff_t end)
105+
static enum kvm_gfn_range_filter kvm_gmem_get_invalidate_filter(struct inode *inode)
106+
{
107+
if ((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED)
108+
return KVM_FILTER_SHARED;
109+
110+
return KVM_FILTER_PRIVATE;
111+
}
112+
113+
static void __kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
114+
pgoff_t end,
115+
enum kvm_gfn_range_filter attr_filter)
107116
{
108117
bool flush = false, found_memslot = false;
109118
struct kvm_memory_slot *slot;
@@ -118,8 +127,7 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
118127
.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
119128
.slot = slot,
120129
.may_block = true,
121-
/* guest memfd is relevant to only private mappings. */
122-
.attr_filter = KVM_FILTER_PRIVATE,
130+
.attr_filter = attr_filter,
123131
};
124132

125133
if (!found_memslot) {
@@ -139,8 +147,21 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
139147
KVM_MMU_UNLOCK(kvm);
140148
}
141149

142-
static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
143-
pgoff_t end)
150+
static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start,
151+
pgoff_t end)
152+
{
153+
struct list_head *gmem_list = &inode->i_mapping->i_private_list;
154+
enum kvm_gfn_range_filter attr_filter;
155+
struct kvm_gmem *gmem;
156+
157+
attr_filter = kvm_gmem_get_invalidate_filter(inode);
158+
159+
list_for_each_entry(gmem, gmem_list, entry)
160+
__kvm_gmem_invalidate_begin(gmem, start, end, attr_filter);
161+
}
162+
163+
static void __kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
164+
pgoff_t end)
144165
{
145166
struct kvm *kvm = gmem->kvm;
146167

@@ -151,26 +172,32 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
151172
}
152173
}
153174

154-
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
175+
static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start,
176+
pgoff_t end)
155177
{
156178
struct list_head *gmem_list = &inode->i_mapping->i_private_list;
179+
struct kvm_gmem *gmem;
180+
181+
list_for_each_entry(gmem, gmem_list, entry)
182+
__kvm_gmem_invalidate_end(gmem, start, end);
183+
}
184+
185+
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
186+
{
157187
pgoff_t start = offset >> PAGE_SHIFT;
158188
pgoff_t end = (offset + len) >> PAGE_SHIFT;
159-
struct kvm_gmem *gmem;
160189

161190
/*
162191
* Bindings must be stable across invalidation to ensure the start+end
163192
* are balanced.
164193
*/
165194
filemap_invalidate_lock(inode->i_mapping);
166195

167-
list_for_each_entry(gmem, gmem_list, entry)
168-
kvm_gmem_invalidate_begin(gmem, start, end);
196+
kvm_gmem_invalidate_begin(inode, start, end);
169197

170198
truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
171199

172-
list_for_each_entry(gmem, gmem_list, entry)
173-
kvm_gmem_invalidate_end(gmem, start, end);
200+
kvm_gmem_invalidate_end(inode, start, end);
174201

175202
filemap_invalidate_unlock(inode->i_mapping);
176203

@@ -280,8 +307,9 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
280307
* Zap all SPTEs pointed at by this file. Do not free the backing
281308
* memory, as its lifetime is associated with the inode, not the file.
282309
*/
283-
kvm_gmem_invalidate_begin(gmem, 0, -1ul);
284-
kvm_gmem_invalidate_end(gmem, 0, -1ul);
310+
__kvm_gmem_invalidate_begin(gmem, 0, -1ul,
311+
kvm_gmem_get_invalidate_filter(inode));
312+
__kvm_gmem_invalidate_end(gmem, 0, -1ul);
285313

286314
list_del(&gmem->entry);
287315

@@ -403,17 +431,14 @@ static int kvm_gmem_migrate_folio(struct address_space *mapping,
403431

404432
static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
405433
{
406-
struct list_head *gmem_list = &mapping->i_private_list;
407-
struct kvm_gmem *gmem;
408434
pgoff_t start, end;
409435

410436
filemap_invalidate_lock_shared(mapping);
411437

412438
start = folio->index;
413439
end = start + folio_nr_pages(folio);
414440

415-
list_for_each_entry(gmem, gmem_list, entry)
416-
kvm_gmem_invalidate_begin(gmem, start, end);
441+
kvm_gmem_invalidate_begin(mapping->host, start, end);
417442

418443
/*
419444
* Do not truncate the range, what action is taken in response to the
@@ -424,8 +449,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
424449
* error to userspace.
425450
*/
426451

427-
list_for_each_entry(gmem, gmem_list, entry)
428-
kvm_gmem_invalidate_end(gmem, start, end);
452+
kvm_gmem_invalidate_end(mapping->host, start, end);
429453

430454
filemap_invalidate_unlock_shared(mapping);
431455

0 commit comments

Comments
 (0)