Skip to content

Commit bb05b0e

Browse files
committed
Merge tag 'kvm-x86-selftests-6.5' of https://github.com/kvm-x86/linux into HEAD
KVM selftests changes for 6.5: - Add a test for splitting and reconstituting hugepages during and after dirty logging - Add support for CPU pinning in demand paging test - Generate dependency files so that partial rebuilds work as expected - Misc cleanups and fixes
2 parents 751d77f + 5ed1952 commit bb05b0e

11 files changed

Lines changed: 416 additions & 118 deletions

tools/testing/selftests/kvm/Makefile

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
6161
# Compiled test targets
6262
TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
6363
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
64+
TEST_GEN_PROGS_x86_64 += x86_64/dirty_log_page_splitting_test
6465
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
6566
TEST_GEN_PROGS_x86_64 += x86_64/exit_on_emulation_failure_test
6667
TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
@@ -185,6 +186,8 @@ TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
185186
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
186187
LIBKVM += $(LIBKVM_$(ARCH_DIR))
187188

189+
OVERRIDE_TARGETS = 1
190+
188191
# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
189192
# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
190193
# which causes the environment variable to override the makefile).
@@ -199,7 +202,7 @@ else
199202
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
200203
endif
201204
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
202-
-Wno-gnu-variable-sized-type-not-at-end \
205+
-Wno-gnu-variable-sized-type-not-at-end -MD\
203206
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
204207
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
205208
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
@@ -226,7 +229,18 @@ LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
226229
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
227230
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
228231

229-
EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
232+
TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
233+
TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
234+
TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
235+
TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
236+
-include $(TEST_DEP_FILES)
237+
238+
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
239+
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
240+
$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
241+
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
242+
243+
EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) cscope.*
230244

231245
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
232246
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c

tools/testing/selftests/kvm/demand_paging_test.c

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ static void prefault_mem(void *alias, uint64_t len)
128128

129129
static void run_test(enum vm_guest_mode mode, void *arg)
130130
{
131+
struct memstress_vcpu_args *vcpu_args;
131132
struct test_params *p = arg;
132133
struct uffd_desc **uffd_descs = NULL;
133134
struct timespec start;
@@ -145,24 +146,24 @@ static void run_test(enum vm_guest_mode mode, void *arg)
145146
"Failed to allocate buffer for guest data pattern");
146147
memset(guest_data_prototype, 0xAB, demand_paging_size);
147148

149+
if (p->uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
150+
for (i = 0; i < nr_vcpus; i++) {
151+
vcpu_args = &memstress_args.vcpu_args[i];
152+
prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa),
153+
vcpu_args->pages * memstress_args.guest_page_size);
154+
}
155+
}
156+
148157
if (p->uffd_mode) {
149158
uffd_descs = malloc(nr_vcpus * sizeof(struct uffd_desc *));
150159
TEST_ASSERT(uffd_descs, "Memory allocation failed");
151-
152160
for (i = 0; i < nr_vcpus; i++) {
153-
struct memstress_vcpu_args *vcpu_args;
154161
void *vcpu_hva;
155-
void *vcpu_alias;
156162

157163
vcpu_args = &memstress_args.vcpu_args[i];
158164

159165
/* Cache the host addresses of the region */
160166
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
161-
vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
162-
163-
prefault_mem(vcpu_alias,
164-
vcpu_args->pages * memstress_args.guest_page_size);
165-
166167
/*
167168
* Set up user fault fd to handle demand paging
168169
* requests.
@@ -207,10 +208,11 @@ static void help(char *name)
207208
{
208209
puts("");
209210
printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
210-
" [-b memory] [-s type] [-v vcpus] [-o]\n", name);
211+
" [-b memory] [-s type] [-v vcpus] [-c cpu_list] [-o]\n", name);
211212
guest_modes_help();
212213
printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
213214
" UFFD registration mode: 'MISSING' or 'MINOR'.\n");
215+
kvm_print_vcpu_pinning_help();
214216
printf(" -d: add a delay in usec to the User Fault\n"
215217
" FD handler to simulate demand paging\n"
216218
" overheads. Ignored without -u.\n");
@@ -228,6 +230,7 @@ static void help(char *name)
228230
int main(int argc, char *argv[])
229231
{
230232
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
233+
const char *cpulist = NULL;
231234
struct test_params p = {
232235
.src_type = DEFAULT_VM_MEM_SRC,
233236
.partition_vcpu_memory_access = true,
@@ -236,7 +239,7 @@ int main(int argc, char *argv[])
236239

237240
guest_modes_append_default();
238241

239-
while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
242+
while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:c:o")) != -1) {
240243
switch (opt) {
241244
case 'm':
242245
guest_modes_cmdline(optarg);
@@ -263,6 +266,9 @@ int main(int argc, char *argv[])
263266
TEST_ASSERT(nr_vcpus <= max_vcpus,
264267
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
265268
break;
269+
case 'c':
270+
cpulist = optarg;
271+
break;
266272
case 'o':
267273
p.partition_vcpu_memory_access = false;
268274
break;
@@ -278,6 +284,12 @@ int main(int argc, char *argv[])
278284
TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
279285
}
280286

287+
if (cpulist) {
288+
kvm_parse_vcpu_pinning(cpulist, memstress_args.vcpu_to_pcpu,
289+
nr_vcpus);
290+
memstress_args.pin_vcpus = true;
291+
}
292+
281293
for_each_guest_mode(run_test, &p);
282294

283295
return 0;

tools/testing/selftests/kvm/dirty_log_perf_test.c

Lines changed: 8 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -136,77 +136,6 @@ struct test_params {
136136
bool random_access;
137137
};
138138

139-
static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
140-
{
141-
int i;
142-
143-
for (i = 0; i < slots; i++) {
144-
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
145-
int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
146-
147-
vm_mem_region_set_flags(vm, slot, flags);
148-
}
149-
}
150-
151-
static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
152-
{
153-
toggle_dirty_logging(vm, slots, true);
154-
}
155-
156-
static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
157-
{
158-
toggle_dirty_logging(vm, slots, false);
159-
}
160-
161-
static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
162-
{
163-
int i;
164-
165-
for (i = 0; i < slots; i++) {
166-
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
167-
168-
kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
169-
}
170-
}
171-
172-
static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
173-
int slots, uint64_t pages_per_slot)
174-
{
175-
int i;
176-
177-
for (i = 0; i < slots; i++) {
178-
int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
179-
180-
kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
181-
}
182-
}
183-
184-
static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
185-
{
186-
unsigned long **bitmaps;
187-
int i;
188-
189-
bitmaps = malloc(slots * sizeof(bitmaps[0]));
190-
TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
191-
192-
for (i = 0; i < slots; i++) {
193-
bitmaps[i] = bitmap_zalloc(pages_per_slot);
194-
TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
195-
}
196-
197-
return bitmaps;
198-
}
199-
200-
static void free_bitmaps(unsigned long *bitmaps[], int slots)
201-
{
202-
int i;
203-
204-
for (i = 0; i < slots; i++)
205-
free(bitmaps[i]);
206-
207-
free(bitmaps);
208-
}
209-
210139
static void run_test(enum vm_guest_mode mode, void *arg)
211140
{
212141
struct test_params *p = arg;
@@ -236,7 +165,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
236165
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
237166
pages_per_slot = host_num_pages / p->slots;
238167

239-
bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
168+
bitmaps = memstress_alloc_bitmaps(p->slots, pages_per_slot);
240169

241170
if (dirty_log_manual_caps)
242171
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
@@ -277,7 +206,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
277206

278207
/* Enable dirty logging */
279208
clock_gettime(CLOCK_MONOTONIC, &start);
280-
enable_dirty_logging(vm, p->slots);
209+
memstress_enable_dirty_logging(vm, p->slots);
281210
ts_diff = timespec_elapsed(start);
282211
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
283212
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -306,7 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
306235
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
307236

308237
clock_gettime(CLOCK_MONOTONIC, &start);
309-
get_dirty_log(vm, bitmaps, p->slots);
238+
memstress_get_dirty_log(vm, bitmaps, p->slots);
310239
ts_diff = timespec_elapsed(start);
311240
get_dirty_log_total = timespec_add(get_dirty_log_total,
312241
ts_diff);
@@ -315,7 +244,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
315244

316245
if (dirty_log_manual_caps) {
317246
clock_gettime(CLOCK_MONOTONIC, &start);
318-
clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
247+
memstress_clear_dirty_log(vm, bitmaps, p->slots,
248+
pages_per_slot);
319249
ts_diff = timespec_elapsed(start);
320250
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
321251
ts_diff);
@@ -334,7 +264,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
334264

335265
/* Disable dirty logging */
336266
clock_gettime(CLOCK_MONOTONIC, &start);
337-
disable_dirty_logging(vm, p->slots);
267+
memstress_disable_dirty_logging(vm, p->slots);
338268
ts_diff = timespec_elapsed(start);
339269
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
340270
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -359,7 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
359289
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
360290
}
361291

362-
free_bitmaps(bitmaps, p->slots);
292+
memstress_free_bitmaps(bitmaps, p->slots);
363293
arch_cleanup_vm(vm);
364294
memstress_destroy_vm(vm);
365295
}
@@ -402,17 +332,7 @@ static void help(char *name)
402332
" so -w X means each page has an X%% chance of writing\n"
403333
" and a (100-X)%% chance of reading.\n"
404334
" (default: 100 i.e. all pages are written to.)\n");
405-
printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
406-
" values (target pCPU), one for each vCPU, plus an optional\n"
407-
" entry for the main application task (specified via entry\n"
408-
" <nr_vcpus + 1>). If used, entries must be provided for all\n"
409-
" vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
410-
" E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
411-
" vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
412-
" ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
413-
" To leave the application task unpinned, drop the final entry:\n\n"
414-
" ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"
415-
" (default: no pinning)\n");
335+
kvm_print_vcpu_pinning_help();
416336
puts("");
417337
exit(0);
418338
}

tools/testing/selftests/kvm/include/kvm_util_base.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -733,6 +733,7 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
733733
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
734734

735735
void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
736+
void kvm_print_vcpu_pinning_help(void);
736737
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
737738
int nr_vcpus);
738739

tools/testing/selftests/kvm/include/memstress.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,4 +72,12 @@ void memstress_guest_code(uint32_t vcpu_id);
7272
uint64_t memstress_nested_pages(int nr_vcpus);
7373
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
7474

75+
void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots);
76+
void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots);
77+
void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots);
78+
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
79+
int slots, uint64_t pages_per_slot);
80+
unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot);
81+
void memstress_free_bitmaps(unsigned long *bitmaps[], int slots);
82+
7583
#endif /* SELFTEST_KVM_MEMSTRESS_H */

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -494,6 +494,23 @@ static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
494494
return pcpu;
495495
}
496496

497+
void kvm_print_vcpu_pinning_help(void)
498+
{
499+
const char *name = program_invocation_name;
500+
501+
printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
502+
" values (target pCPU), one for each vCPU, plus an optional\n"
503+
" entry for the main application task (specified via entry\n"
504+
" <nr_vcpus + 1>). If used, entries must be provided for all\n"
505+
" vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
506+
" E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
507+
" vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
508+
" %s -v 3 -c 22,23,24,50\n\n"
509+
" To leave the application task unpinned, drop the final entry:\n\n"
510+
" %s -v 3 -c 22,23,24\n\n"
511+
" (default: no pinning)\n", name, name);
512+
}
513+
497514
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
498515
int nr_vcpus)
499516
{

0 commit comments

Comments
 (0)