Skip to content

Commit 61cee97

Browse files
committed
KVM: selftests: Add wrappers for mmap() and munmap() to assert success
Add and use wrappers for mmap() and munmap() that assert success to reduce a significant amount of boilerplate code, to ensure all tests assert on failure, and to provide consistent error messages on failure. No functional change intended. Reviewed-by: Fuad Tabba <tabba@google.com> Tested-by: Fuad Tabba <tabba@google.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Ackerley Tng <ackerleytng@google.com> Link: https://lore.kernel.org/r/20251003232606.4070510-10-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent df0d992 commit 61cee97

File tree

6 files changed

+64
-64
lines changed

6 files changed

+64
-64
lines changed

tools/testing/selftests/kvm/guest_memfd_test.c

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,7 @@ static void test_mmap_supported(int fd, size_t total_size)
5050
mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
5151
TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd.");
5252

53-
mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
54-
TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed.");
53+
mem = kvm_mmap(total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
5554

5655
memset(mem, val, total_size);
5756
for (i = 0; i < total_size; i++)
@@ -70,8 +69,7 @@ static void test_mmap_supported(int fd, size_t total_size)
7069
for (i = 0; i < total_size; i++)
7170
TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
7271

73-
ret = munmap(mem, total_size);
74-
TEST_ASSERT(!ret, "munmap() should succeed.");
72+
kvm_munmap(mem, total_size);
7573
}
7674

7775
static sigjmp_buf jmpbuf;
@@ -89,10 +87,8 @@ static void test_fault_overflow(int fd, size_t total_size)
8987
const char val = 0xaa;
9088
char *mem;
9189
size_t i;
92-
int ret;
9390

94-
mem = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
95-
TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed.");
91+
mem = kvm_mmap(map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
9692

9793
sigaction(SIGBUS, &sa_new, &sa_old);
9894
if (sigsetjmp(jmpbuf, 1) == 0) {
@@ -104,8 +100,7 @@ static void test_fault_overflow(int fd, size_t total_size)
104100
for (i = 0; i < total_size; i++)
105101
TEST_ASSERT_EQ(READ_ONCE(mem[i]), val);
106102

107-
ret = munmap(mem, map_size);
108-
TEST_ASSERT(!ret, "munmap() should succeed.");
103+
kvm_munmap(mem, map_size);
109104
}
110105

111106
static void test_mmap_not_supported(int fd, size_t total_size)
@@ -351,19 +346,17 @@ static void test_guest_memfd_guest(void)
351346
GUEST_MEMFD_FLAG_INIT_SHARED);
352347
vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0);
353348

354-
mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
355-
TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed");
349+
mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
356350
memset(mem, 0xaa, size);
357-
munmap(mem, size);
351+
kvm_munmap(mem, size);
358352

359353
virt_pg_map(vm, gpa, gpa);
360354
vcpu_args_set(vcpu, 2, gpa, size);
361355
vcpu_run(vcpu);
362356

363357
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
364358

365-
mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
366-
TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed");
359+
mem = kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
367360
for (i = 0; i < size; i++)
368361
TEST_ASSERT_EQ(mem[i], 0xff);
369362

tools/testing/selftests/kvm/include/kvm_util.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,31 @@ static inline bool kvm_has_cap(long cap)
286286
#define __KVM_SYSCALL_ERROR(_name, _ret) \
287287
"%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
288288

289+
static inline void *__kvm_mmap(size_t size, int prot, int flags, int fd,
290+
off_t offset)
291+
{
292+
void *mem;
293+
294+
mem = mmap(NULL, size, prot, flags, fd, offset);
295+
TEST_ASSERT(mem != MAP_FAILED, __KVM_SYSCALL_ERROR("mmap()",
296+
(int)(unsigned long)MAP_FAILED));
297+
298+
return mem;
299+
}
300+
301+
static inline void *kvm_mmap(size_t size, int prot, int flags, int fd)
302+
{
303+
return __kvm_mmap(size, prot, flags, fd, 0);
304+
}
305+
306+
static inline void kvm_munmap(void *mem, size_t size)
307+
{
308+
int ret;
309+
310+
ret = munmap(mem, size);
311+
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
312+
}
313+
289314
/*
290315
* Use the "inner", double-underscore macro when reporting errors from within
291316
* other macros so that the name of ioctl() and not its literal numeric value

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 15 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -741,13 +741,11 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
741741
int ret;
742742

743743
if (vcpu->dirty_gfns) {
744-
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
745-
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
744+
kvm_munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
746745
vcpu->dirty_gfns = NULL;
747746
}
748747

749-
ret = munmap(vcpu->run, vcpu_mmap_sz());
750-
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
748+
kvm_munmap(vcpu->run, vcpu_mmap_sz());
751749

752750
ret = close(vcpu->fd);
753751
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
@@ -783,20 +781,16 @@ void kvm_vm_release(struct kvm_vm *vmp)
783781
static void __vm_mem_region_delete(struct kvm_vm *vm,
784782
struct userspace_mem_region *region)
785783
{
786-
int ret;
787-
788784
rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
789785
rb_erase(&region->hva_node, &vm->regions.hva_tree);
790786
hash_del(&region->slot_node);
791787

792788
sparsebit_free(&region->unused_phy_pages);
793789
sparsebit_free(&region->protected_phy_pages);
794-
ret = munmap(region->mmap_start, region->mmap_size);
795-
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
790+
kvm_munmap(region->mmap_start, region->mmap_size);
796791
if (region->fd >= 0) {
797792
/* There's an extra map when using shared memory. */
798-
ret = munmap(region->mmap_alias, region->mmap_size);
799-
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
793+
kvm_munmap(region->mmap_alias, region->mmap_size);
800794
close(region->fd);
801795
}
802796
if (region->region.guest_memfd >= 0)
@@ -1053,12 +1047,9 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
10531047
region->fd = kvm_memfd_alloc(region->mmap_size,
10541048
src_type == VM_MEM_SRC_SHARED_HUGETLB);
10551049

1056-
region->mmap_start = mmap(NULL, region->mmap_size,
1057-
PROT_READ | PROT_WRITE,
1058-
vm_mem_backing_src_alias(src_type)->flag,
1059-
region->fd, 0);
1060-
TEST_ASSERT(region->mmap_start != MAP_FAILED,
1061-
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1050+
region->mmap_start = kvm_mmap(region->mmap_size, PROT_READ | PROT_WRITE,
1051+
vm_mem_backing_src_alias(src_type)->flag,
1052+
region->fd);
10621053

10631054
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
10641055
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
@@ -1129,12 +1120,10 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
11291120

11301121
/* If shared memory, create an alias. */
11311122
if (region->fd >= 0) {
1132-
region->mmap_alias = mmap(NULL, region->mmap_size,
1133-
PROT_READ | PROT_WRITE,
1134-
vm_mem_backing_src_alias(src_type)->flag,
1135-
region->fd, 0);
1136-
TEST_ASSERT(region->mmap_alias != MAP_FAILED,
1137-
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1123+
region->mmap_alias = kvm_mmap(region->mmap_size,
1124+
PROT_READ | PROT_WRITE,
1125+
vm_mem_backing_src_alias(src_type)->flag,
1126+
region->fd);
11381127

11391128
/* Align host alias address */
11401129
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
@@ -1344,10 +1333,8 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
13441333
TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
13451334
"smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi",
13461335
vcpu_mmap_sz(), sizeof(*vcpu->run));
1347-
vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1348-
PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1349-
TEST_ASSERT(vcpu->run != MAP_FAILED,
1350-
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1336+
vcpu->run = kvm_mmap(vcpu_mmap_sz(), PROT_READ | PROT_WRITE,
1337+
MAP_SHARED, vcpu->fd);
13511338

13521339
if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
13531340
vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
@@ -1794,9 +1781,8 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
17941781
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
17951782
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
17961783

1797-
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1798-
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1799-
TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1784+
addr = __kvm_mmap(size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1785+
page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
18001786

18011787
vcpu->dirty_gfns = addr;
18021788
vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);

tools/testing/selftests/kvm/mmu_stress_test.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -339,8 +339,7 @@ int main(int argc, char *argv[])
339339
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
340340

341341
fd = kvm_memfd_alloc(slot_size, hugepages);
342-
mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
343-
TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
342+
mem = kvm_mmap(slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
344343

345344
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
346345

@@ -413,7 +412,7 @@ int main(int argc, char *argv[])
413412
for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
414413
vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
415414

416-
munmap(mem, slot_size / 2);
415+
kvm_munmap(mem, slot_size / 2);
417416

418417
/* Sanity check that the vCPUs actually ran. */
419418
for (i = 0; i < nr_vcpus; i++)

tools/testing/selftests/kvm/s390/ucontrol_test.c

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -142,19 +142,17 @@ FIXTURE_SETUP(uc_kvm)
142142
self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
143143
ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
144144
TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
145-
self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
146-
PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
147-
ASSERT_NE(self->run, MAP_FAILED);
145+
self->run = kvm_mmap(self->kvm_run_size, PROT_READ | PROT_WRITE,
146+
MAP_SHARED, self->vcpu_fd);
148147
/**
149148
* For virtual cpus that have been created with S390 user controlled
150149
* virtual machines, the resulting vcpu fd can be memory mapped at page
151150
* offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
152151
* the virtual cpu's hardware control block.
153152
*/
154-
self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
155-
PROT_READ | PROT_WRITE, MAP_SHARED,
156-
self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
157-
ASSERT_NE(self->sie_block, MAP_FAILED);
153+
self->sie_block = __kvm_mmap(PAGE_SIZE, PROT_READ | PROT_WRITE,
154+
MAP_SHARED, self->vcpu_fd,
155+
KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
158156

159157
TH_LOG("VM created %p %p", self->run, self->sie_block);
160158

@@ -186,8 +184,8 @@ FIXTURE_SETUP(uc_kvm)
186184

187185
FIXTURE_TEARDOWN(uc_kvm)
188186
{
189-
munmap(self->sie_block, PAGE_SIZE);
190-
munmap(self->run, self->kvm_run_size);
187+
kvm_munmap(self->sie_block, PAGE_SIZE);
188+
kvm_munmap(self->run, self->kvm_run_size);
191189
close(self->vcpu_fd);
192190
close(self->vm_fd);
193191
close(self->kvm_fd);

tools/testing/selftests/kvm/set_memory_region_test.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -433,10 +433,10 @@ static void test_add_max_memory_regions(void)
433433
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
434434
(max_mem_slots - 1), MEM_REGION_SIZE >> 10);
435435

436-
mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
437-
PROT_READ | PROT_WRITE,
438-
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
439-
TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
436+
437+
mem = kvm_mmap((size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
438+
PROT_READ | PROT_WRITE,
439+
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1);
440440
mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
441441

442442
for (slot = 0; slot < max_mem_slots; slot++)
@@ -446,18 +446,17 @@ static void test_add_max_memory_regions(void)
446446
mem_aligned + (uint64_t)slot * MEM_REGION_SIZE);
447447

448448
/* Check it cannot be added memory slots beyond the limit */
449-
mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
450-
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
451-
TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host");
449+
mem_extra = kvm_mmap(MEM_REGION_SIZE, PROT_READ | PROT_WRITE,
450+
MAP_PRIVATE | MAP_ANONYMOUS, -1);
452451

453452
ret = __vm_set_user_memory_region(vm, max_mem_slots, 0,
454453
(uint64_t)max_mem_slots * MEM_REGION_SIZE,
455454
MEM_REGION_SIZE, mem_extra);
456455
TEST_ASSERT(ret == -1 && errno == EINVAL,
457456
"Adding one more memory slot should fail with EINVAL");
458457

459-
munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
460-
munmap(mem_extra, MEM_REGION_SIZE);
458+
kvm_munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment);
459+
kvm_munmap(mem_extra, MEM_REGION_SIZE);
461460
kvm_vm_free(vm);
462461
}
463462

0 commit comments

Comments
 (0)