xref: /openbmc/linux/tools/testing/selftests/kvm/s390x/cmma_test.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Test for s390x CMMA migration
4   *
5   * Copyright IBM Corp. 2023
6   *
7   * Authors:
8   *  Nico Boehr <nrb@linux.ibm.com>
9   */
10  
11  #define _GNU_SOURCE /* for program_invocation_short_name */
12  #include <fcntl.h>
13  #include <stdio.h>
14  #include <stdlib.h>
15  #include <string.h>
16  #include <sys/ioctl.h>
17  
18  #include "test_util.h"
19  #include "kvm_util.h"
20  #include "kselftest.h"
21  
22  #define MAIN_PAGE_COUNT 512
23  
24  #define TEST_DATA_PAGE_COUNT 512
25  #define TEST_DATA_MEMSLOT 1
26  #define TEST_DATA_START_GFN 4096
27  
28  #define TEST_DATA_TWO_PAGE_COUNT 256
29  #define TEST_DATA_TWO_MEMSLOT 2
30  #define TEST_DATA_TWO_START_GFN 8192
31  
32  static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
33  
34  /**
35   * Dirty CMMA attributes of exactly one page in the TEST_DATA memslot,
36   * so use_cmma goes on and the CMMA related ioctls do something.
37   */
guest_do_one_essa(void)38  static void guest_do_one_essa(void)
39  {
40  	asm volatile(
41  		/* load TEST_DATA_START_GFN into r1 */
42  		"	llilf 1,%[start_gfn]\n"
43  		/* calculate the address from the gfn */
44  		"	sllg 1,1,12(0)\n"
45  		/* set the first page in TEST_DATA memslot to STABLE */
46  		"	.insn rrf,0xb9ab0000,2,1,1,0\n"
47  		/* hypercall */
48  		"	diag 0,0,0x501\n"
49  		"0:	j 0b"
50  		:
51  		: [start_gfn] "L"(TEST_DATA_START_GFN)
52  		: "r1", "r2", "memory", "cc"
53  	);
54  }
55  
56  /**
57   * Touch CMMA attributes of all pages in TEST_DATA memslot. Set them to stable
58   * state.
59   */
guest_dirty_test_data(void)60  static void guest_dirty_test_data(void)
61  {
62  	asm volatile(
63  		/* r1 = TEST_DATA_START_GFN */
64  		"	xgr 1,1\n"
65  		"	llilf 1,%[start_gfn]\n"
66  		/* r5 = TEST_DATA_PAGE_COUNT */
67  		"	lghi 5,%[page_count]\n"
68  		/* r5 += r1 */
69  		"2:	agfr 5,1\n"
70  		/* r2 = r1 << 12 */
71  		"1:	sllg 2,1,12(0)\n"
72  		/* essa(r4, r2, SET_STABLE) */
73  		"	.insn rrf,0xb9ab0000,4,2,1,0\n"
74  		/* i++ */
75  		"	agfi 1,1\n"
76  		/* if r1 < r5 goto 1 */
77  		"	cgrjl 1,5,1b\n"
78  		/* hypercall */
79  		"	diag 0,0,0x501\n"
80  		"0:	j 0b"
81  		:
82  		: [start_gfn] "L"(TEST_DATA_START_GFN),
83  		  [page_count] "L"(TEST_DATA_PAGE_COUNT)
84  		:
85  			/* the counter in our loop over the pages */
86  			"r1",
87  			/* the calculated page physical address */
88  			"r2",
89  			/* ESSA output register */
90  			"r4",
91  			/* last page */
92  			"r5",
93  			"cc", "memory"
94  	);
95  }
96  
create_vm(void)97  static struct kvm_vm *create_vm(void)
98  {
99  	return ____vm_create(VM_MODE_DEFAULT);
100  }
101  
create_main_memslot(struct kvm_vm * vm)102  static void create_main_memslot(struct kvm_vm *vm)
103  {
104  	int i;
105  
106  	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0);
107  	/* set the array of memslots to zero like __vm_create does */
108  	for (i = 0; i < NR_MEM_REGIONS; i++)
109  		vm->memslots[i] = 0;
110  }
111  
create_test_memslot(struct kvm_vm * vm)112  static void create_test_memslot(struct kvm_vm *vm)
113  {
114  	vm_userspace_mem_region_add(vm,
115  				    VM_MEM_SRC_ANONYMOUS,
116  				    TEST_DATA_START_GFN << vm->page_shift,
117  				    TEST_DATA_MEMSLOT,
118  				    TEST_DATA_PAGE_COUNT,
119  				    0
120  				   );
121  	vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
122  }
123  
create_memslots(struct kvm_vm * vm)124  static void create_memslots(struct kvm_vm *vm)
125  {
126  	/*
127  	 * Our VM has the following memory layout:
128  	 * +------+---------------------------+
129  	 * | GFN  | Memslot                   |
130  	 * +------+---------------------------+
131  	 * | 0    |                           |
132  	 * | ...  | MAIN (Code, Stack, ...)   |
133  	 * | 511  |                           |
134  	 * +------+---------------------------+
135  	 * | 4096 |                           |
136  	 * | ...  | TEST_DATA                 |
137  	 * | 4607 |                           |
138  	 * +------+---------------------------+
139  	 */
140  	create_main_memslot(vm);
141  	create_test_memslot(vm);
142  }
143  
finish_vm_setup(struct kvm_vm * vm)144  static void finish_vm_setup(struct kvm_vm *vm)
145  {
146  	struct userspace_mem_region *slot0;
147  
148  	kvm_vm_elf_load(vm, program_invocation_name);
149  
150  	slot0 = memslot2region(vm, 0);
151  	ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
152  
153  	kvm_arch_vm_post_create(vm);
154  }
155  
create_vm_two_memslots(void)156  static struct kvm_vm *create_vm_two_memslots(void)
157  {
158  	struct kvm_vm *vm;
159  
160  	vm = create_vm();
161  
162  	create_memslots(vm);
163  
164  	finish_vm_setup(vm);
165  
166  	return vm;
167  }
168  
enable_cmma(struct kvm_vm * vm)169  static void enable_cmma(struct kvm_vm *vm)
170  {
171  	int r;
172  
173  	r = __kvm_device_attr_set(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA, NULL);
174  	TEST_ASSERT(!r, "enabling cmma failed r=%d errno=%d", r, errno);
175  }
176  
enable_dirty_tracking(struct kvm_vm * vm)177  static void enable_dirty_tracking(struct kvm_vm *vm)
178  {
179  	vm_mem_region_set_flags(vm, 0, KVM_MEM_LOG_DIRTY_PAGES);
180  	vm_mem_region_set_flags(vm, TEST_DATA_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
181  }
182  
__enable_migration_mode(struct kvm_vm * vm)183  static int __enable_migration_mode(struct kvm_vm *vm)
184  {
185  	return __kvm_device_attr_set(vm->fd,
186  				     KVM_S390_VM_MIGRATION,
187  				     KVM_S390_VM_MIGRATION_START,
188  				     NULL
189  				    );
190  }
191  
enable_migration_mode(struct kvm_vm * vm)192  static void enable_migration_mode(struct kvm_vm *vm)
193  {
194  	int r = __enable_migration_mode(vm);
195  
196  	TEST_ASSERT(!r, "enabling migration mode failed r=%d errno=%d", r, errno);
197  }
198  
is_migration_mode_on(struct kvm_vm * vm)199  static bool is_migration_mode_on(struct kvm_vm *vm)
200  {
201  	u64 out;
202  	int r;
203  
204  	r = __kvm_device_attr_get(vm->fd,
205  				  KVM_S390_VM_MIGRATION,
206  				  KVM_S390_VM_MIGRATION_STATUS,
207  				  &out
208  				 );
209  	TEST_ASSERT(!r, "getting migration mode status failed r=%d errno=%d", r, errno);
210  	return out;
211  }
212  
vm_get_cmma_bits(struct kvm_vm * vm,u64 flags,int * errno_out)213  static int vm_get_cmma_bits(struct kvm_vm *vm, u64 flags, int *errno_out)
214  {
215  	struct kvm_s390_cmma_log args;
216  	int rc;
217  
218  	errno = 0;
219  
220  	args = (struct kvm_s390_cmma_log){
221  		.start_gfn = 0,
222  		.count = sizeof(cmma_value_buf),
223  		.flags = flags,
224  		.values = (__u64)&cmma_value_buf[0]
225  	};
226  	rc = __vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
227  
228  	*errno_out = errno;
229  	return rc;
230  }
231  
test_get_cmma_basic(void)232  static void test_get_cmma_basic(void)
233  {
234  	struct kvm_vm *vm = create_vm_two_memslots();
235  	struct kvm_vcpu *vcpu;
236  	int rc, errno_out;
237  
238  	/* GET_CMMA_BITS without CMMA enabled should fail */
239  	rc = vm_get_cmma_bits(vm, 0, &errno_out);
240  	TEST_ASSERT_EQ(rc, -1);
241  	TEST_ASSERT_EQ(errno_out, ENXIO);
242  
243  	enable_cmma(vm);
244  	vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
245  
246  	vcpu_run(vcpu);
247  
248  	/* GET_CMMA_BITS without migration mode and without peeking should fail */
249  	rc = vm_get_cmma_bits(vm, 0, &errno_out);
250  	TEST_ASSERT_EQ(rc, -1);
251  	TEST_ASSERT_EQ(errno_out, EINVAL);
252  
253  	/* GET_CMMA_BITS without migration mode and with peeking should work */
254  	rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
255  	TEST_ASSERT_EQ(rc, 0);
256  	TEST_ASSERT_EQ(errno_out, 0);
257  
258  	enable_dirty_tracking(vm);
259  	enable_migration_mode(vm);
260  
261  	/* GET_CMMA_BITS with invalid flags */
262  	rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
263  	TEST_ASSERT_EQ(rc, -1);
264  	TEST_ASSERT_EQ(errno_out, EINVAL);
265  
266  	kvm_vm_free(vm);
267  }
268  
assert_exit_was_hypercall(struct kvm_vcpu * vcpu)269  static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
270  {
271  	TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
272  	TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
273  	TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
274  	TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
275  }
276  
test_migration_mode(void)277  static void test_migration_mode(void)
278  {
279  	struct kvm_vm *vm = create_vm();
280  	struct kvm_vcpu *vcpu;
281  	u64 orig_psw;
282  	int rc;
283  
284  	/* enabling migration mode on a VM without memory should fail */
285  	rc = __enable_migration_mode(vm);
286  	TEST_ASSERT_EQ(rc, -1);
287  	TEST_ASSERT_EQ(errno, EINVAL);
288  	TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
289  	errno = 0;
290  
291  	create_memslots(vm);
292  	finish_vm_setup(vm);
293  
294  	enable_cmma(vm);
295  	vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
296  	orig_psw = vcpu->run->psw_addr;
297  
298  	/*
299  	 * Execute one essa instruction in the guest. Otherwise the guest will
300  	 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
301  	 */
302  	vcpu_run(vcpu);
303  	assert_exit_was_hypercall(vcpu);
304  
305  	/* migration mode when memslots have dirty tracking off should fail */
306  	rc = __enable_migration_mode(vm);
307  	TEST_ASSERT_EQ(rc, -1);
308  	TEST_ASSERT_EQ(errno, EINVAL);
309  	TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
310  	errno = 0;
311  
312  	/* enable dirty tracking */
313  	enable_dirty_tracking(vm);
314  
315  	/* enabling migration mode should work now */
316  	rc = __enable_migration_mode(vm);
317  	TEST_ASSERT_EQ(rc, 0);
318  	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
319  	errno = 0;
320  
321  	/* execute another ESSA instruction to see this goes fine */
322  	vcpu->run->psw_addr = orig_psw;
323  	vcpu_run(vcpu);
324  	assert_exit_was_hypercall(vcpu);
325  
326  	/*
327  	 * With migration mode on, create a new memslot with dirty tracking off.
328  	 * This should turn off migration mode.
329  	 */
330  	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
331  	vm_userspace_mem_region_add(vm,
332  				    VM_MEM_SRC_ANONYMOUS,
333  				    TEST_DATA_TWO_START_GFN << vm->page_shift,
334  				    TEST_DATA_TWO_MEMSLOT,
335  				    TEST_DATA_TWO_PAGE_COUNT,
336  				    0
337  				   );
338  	TEST_ASSERT(!is_migration_mode_on(vm),
339  		    "creating memslot without dirty tracking turns off migration mode"
340  		   );
341  
342  	/* ESSA instructions should still execute fine */
343  	vcpu->run->psw_addr = orig_psw;
344  	vcpu_run(vcpu);
345  	assert_exit_was_hypercall(vcpu);
346  
347  	/*
348  	 * Turn on dirty tracking on the new memslot.
349  	 * It should be possible to turn migration mode back on again.
350  	 */
351  	vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
352  	rc = __enable_migration_mode(vm);
353  	TEST_ASSERT_EQ(rc, 0);
354  	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
355  	errno = 0;
356  
357  	/*
358  	 * Turn off dirty tracking again, this time with just a flag change.
359  	 * Again, migration mode should turn off.
360  	 */
361  	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
362  	vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, 0);
363  	TEST_ASSERT(!is_migration_mode_on(vm),
364  		    "disabling dirty tracking should turn off migration mode"
365  		   );
366  
367  	/* ESSA instructions should still execute fine */
368  	vcpu->run->psw_addr = orig_psw;
369  	vcpu_run(vcpu);
370  	assert_exit_was_hypercall(vcpu);
371  
372  	kvm_vm_free(vm);
373  }
374  
375  /**
376   * Given a VM with the MAIN and TEST_DATA memslot, assert that both slots have
377   * CMMA attributes of all pages in both memslots and nothing more dirty.
378   * This has the useful side effect of ensuring nothing is CMMA dirty after this
379   * function.
380   */
assert_all_slots_cmma_dirty(struct kvm_vm * vm)381  static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
382  {
383  	struct kvm_s390_cmma_log args;
384  
385  	/*
386  	 * First iteration - everything should be dirty.
387  	 * Start at the main memslot...
388  	 */
389  	args = (struct kvm_s390_cmma_log){
390  		.start_gfn = 0,
391  		.count = sizeof(cmma_value_buf),
392  		.flags = 0,
393  		.values = (__u64)&cmma_value_buf[0]
394  	};
395  	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
396  	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
397  	TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
398  	TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
399  	TEST_ASSERT_EQ(args.start_gfn, 0);
400  
401  	/* ...and then - after a hole - the TEST_DATA memslot should follow */
402  	args = (struct kvm_s390_cmma_log){
403  		.start_gfn = MAIN_PAGE_COUNT,
404  		.count = sizeof(cmma_value_buf),
405  		.flags = 0,
406  		.values = (__u64)&cmma_value_buf[0]
407  	};
408  	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
409  	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
410  	TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
411  	TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
412  	TEST_ASSERT_EQ(args.remaining, 0);
413  
414  	/* ...and nothing else should be there */
415  	args = (struct kvm_s390_cmma_log){
416  		.start_gfn = TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT,
417  		.count = sizeof(cmma_value_buf),
418  		.flags = 0,
419  		.values = (__u64)&cmma_value_buf[0]
420  	};
421  	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
422  	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
423  	TEST_ASSERT_EQ(args.count, 0);
424  	TEST_ASSERT_EQ(args.start_gfn, 0);
425  	TEST_ASSERT_EQ(args.remaining, 0);
426  }
427  
428  /**
429   * Given a VM, assert no pages are CMMA dirty.
430   */
assert_no_pages_cmma_dirty(struct kvm_vm * vm)431  static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
432  {
433  	struct kvm_s390_cmma_log args;
434  
435  	/* If we start from GFN 0 again, nothing should be dirty. */
436  	args = (struct kvm_s390_cmma_log){
437  		.start_gfn = 0,
438  		.count = sizeof(cmma_value_buf),
439  		.flags = 0,
440  		.values = (__u64)&cmma_value_buf[0]
441  	};
442  	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
443  	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
444  	if (args.count || args.remaining || args.start_gfn)
445  		TEST_FAIL("pages are still dirty start_gfn=0x%llx count=%u remaining=%llu",
446  			  args.start_gfn,
447  			  args.count,
448  			  args.remaining
449  			 );
450  }
451  
test_get_inital_dirty(void)452  static void test_get_inital_dirty(void)
453  {
454  	struct kvm_vm *vm = create_vm_two_memslots();
455  	struct kvm_vcpu *vcpu;
456  
457  	enable_cmma(vm);
458  	vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
459  
460  	/*
461  	 * Execute one essa instruction in the guest. Otherwise the guest will
462  	 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
463  	 */
464  	vcpu_run(vcpu);
465  	assert_exit_was_hypercall(vcpu);
466  
467  	enable_dirty_tracking(vm);
468  	enable_migration_mode(vm);
469  
470  	assert_all_slots_cmma_dirty(vm);
471  
472  	/* Start from the beginning again and make sure nothing else is dirty */
473  	assert_no_pages_cmma_dirty(vm);
474  
475  	kvm_vm_free(vm);
476  }
477  
query_cmma_range(struct kvm_vm * vm,u64 start_gfn,u64 gfn_count,struct kvm_s390_cmma_log * res_out)478  static void query_cmma_range(struct kvm_vm *vm,
479  			     u64 start_gfn, u64 gfn_count,
480  			     struct kvm_s390_cmma_log *res_out)
481  {
482  	*res_out = (struct kvm_s390_cmma_log){
483  		.start_gfn = start_gfn,
484  		.count = gfn_count,
485  		.flags = 0,
486  		.values = (__u64)&cmma_value_buf[0]
487  	};
488  	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
489  	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, res_out);
490  }
491  
492  /**
493   * Assert the given cmma_log struct that was executed by query_cmma_range()
494   * indicates the first dirty gfn is at first_dirty_gfn and contains exactly
495   * dirty_gfn_count CMMA values.
496   */
assert_cmma_dirty(u64 first_dirty_gfn,u64 dirty_gfn_count,const struct kvm_s390_cmma_log * res)497  static void assert_cmma_dirty(u64 first_dirty_gfn,
498  			      u64 dirty_gfn_count,
499  			      const struct kvm_s390_cmma_log *res)
500  {
501  	TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
502  	TEST_ASSERT_EQ(res->count, dirty_gfn_count);
503  	for (size_t i = 0; i < dirty_gfn_count; i++)
504  		TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
505  	TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
506  }
507  
test_get_skip_holes(void)508  static void test_get_skip_holes(void)
509  {
510  	size_t gfn_offset;
511  	struct kvm_vm *vm = create_vm_two_memslots();
512  	struct kvm_s390_cmma_log log;
513  	struct kvm_vcpu *vcpu;
514  	u64 orig_psw;
515  
516  	enable_cmma(vm);
517  	vcpu = vm_vcpu_add(vm, 1, guest_dirty_test_data);
518  
519  	orig_psw = vcpu->run->psw_addr;
520  
521  	/*
522  	 * Execute some essa instructions in the guest. Otherwise the guest will
523  	 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
524  	 */
525  	vcpu_run(vcpu);
526  	assert_exit_was_hypercall(vcpu);
527  
528  	enable_dirty_tracking(vm);
529  	enable_migration_mode(vm);
530  
531  	/* un-dirty all pages */
532  	assert_all_slots_cmma_dirty(vm);
533  
534  	/* Then, dirty just the TEST_DATA memslot */
535  	vcpu->run->psw_addr = orig_psw;
536  	vcpu_run(vcpu);
537  
538  	gfn_offset = TEST_DATA_START_GFN;
539  	/**
540  	 * Query CMMA attributes of one page, starting at page 0. Since the
541  	 * main memslot was not touched by the VM, this should yield the first
542  	 * page of the TEST_DATA memslot.
543  	 * The dirty bitmap should now look like this:
544  	 * 0: not dirty
545  	 * [0x1, 0x200): dirty
546  	 */
547  	query_cmma_range(vm, 0, 1, &log);
548  	assert_cmma_dirty(gfn_offset, 1, &log);
549  	gfn_offset++;
550  
551  	/**
552  	 * Query CMMA attributes of 32 (0x20) pages past the end of the TEST_DATA
553  	 * memslot. This should wrap back to the beginning of the TEST_DATA
554  	 * memslot, page 1.
555  	 * The dirty bitmap should now look like this:
556  	 * [0, 0x21): not dirty
557  	 * [0x21, 0x200): dirty
558  	 */
559  	query_cmma_range(vm, TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT, 0x20, &log);
560  	assert_cmma_dirty(gfn_offset, 0x20, &log);
561  	gfn_offset += 0x20;
562  
563  	/* Skip 32 pages */
564  	gfn_offset += 0x20;
565  
566  	/**
567  	 * After skipping 32 pages, query the next 32 (0x20) pages.
568  	 * The dirty bitmap should now look like this:
569  	 * [0, 0x21): not dirty
570  	 * [0x21, 0x41): dirty
571  	 * [0x41, 0x61): not dirty
572  	 * [0x61, 0x200): dirty
573  	 */
574  	query_cmma_range(vm, gfn_offset, 0x20, &log);
575  	assert_cmma_dirty(gfn_offset, 0x20, &log);
576  	gfn_offset += 0x20;
577  
578  	/**
579  	 * Query 1 page from the beginning of the TEST_DATA memslot. This should
580  	 * yield page 0x21.
581  	 * The dirty bitmap should now look like this:
582  	 * [0, 0x22): not dirty
583  	 * [0x22, 0x41): dirty
584  	 * [0x41, 0x61): not dirty
585  	 * [0x61, 0x200): dirty
586  	 */
587  	query_cmma_range(vm, TEST_DATA_START_GFN, 1, &log);
588  	assert_cmma_dirty(TEST_DATA_START_GFN + 0x21, 1, &log);
589  	gfn_offset++;
590  
591  	/**
592  	 * Query 15 (0xF) pages from page 0x23 in TEST_DATA memslot.
593  	 * This should yield pages [0x23, 0x33).
594  	 * The dirty bitmap should now look like this:
595  	 * [0, 0x22): not dirty
596  	 * 0x22: dirty
597  	 * [0x23, 0x33): not dirty
598  	 * [0x33, 0x41): dirty
599  	 * [0x41, 0x61): not dirty
600  	 * [0x61, 0x200): dirty
601  	 */
602  	gfn_offset = TEST_DATA_START_GFN + 0x23;
603  	query_cmma_range(vm, gfn_offset, 15, &log);
604  	assert_cmma_dirty(gfn_offset, 15, &log);
605  
606  	/**
607  	 * Query 17 (0x11) pages from page 0x22 in TEST_DATA memslot.
608  	 * This should yield page [0x22, 0x33)
609  	 * The dirty bitmap should now look like this:
610  	 * [0, 0x33): not dirty
611  	 * [0x33, 0x41): dirty
612  	 * [0x41, 0x61): not dirty
613  	 * [0x61, 0x200): dirty
614  	 */
615  	gfn_offset = TEST_DATA_START_GFN + 0x22;
616  	query_cmma_range(vm, gfn_offset, 17, &log);
617  	assert_cmma_dirty(gfn_offset, 17, &log);
618  
619  	/**
620  	 * Query 25 (0x19) pages from page 0x40 in TEST_DATA memslot.
621  	 * This should yield page 0x40 and nothing more, since there are more
622  	 * than 16 non-dirty pages after page 0x40.
623  	 * The dirty bitmap should now look like this:
624  	 * [0, 0x33): not dirty
625  	 * [0x33, 0x40): dirty
626  	 * [0x40, 0x61): not dirty
627  	 * [0x61, 0x200): dirty
628  	 */
629  	gfn_offset = TEST_DATA_START_GFN + 0x40;
630  	query_cmma_range(vm, gfn_offset, 25, &log);
631  	assert_cmma_dirty(gfn_offset, 1, &log);
632  
633  	/**
634  	 * Query pages [0x33, 0x40).
635  	 * The dirty bitmap should now look like this:
636  	 * [0, 0x61): not dirty
637  	 * [0x61, 0x200): dirty
638  	 */
639  	gfn_offset = TEST_DATA_START_GFN + 0x33;
640  	query_cmma_range(vm, gfn_offset, 0x40 - 0x33, &log);
641  	assert_cmma_dirty(gfn_offset, 0x40 - 0x33, &log);
642  
643  	/**
644  	 * Query the remaining pages [0x61, 0x200).
645  	 */
646  	gfn_offset = TEST_DATA_START_GFN;
647  	query_cmma_range(vm, gfn_offset, TEST_DATA_PAGE_COUNT - 0x61, &log);
648  	assert_cmma_dirty(TEST_DATA_START_GFN + 0x61, TEST_DATA_PAGE_COUNT - 0x61, &log);
649  
650  	assert_no_pages_cmma_dirty(vm);
651  }
652  
653  struct testdef {
654  	const char *name;
655  	void (*test)(void);
656  } testlist[] = {
657  	{ "migration mode and dirty tracking", test_migration_mode },
658  	{ "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
659  	{ "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
660  	{ "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
661  };
662  
663  /**
664   * The kernel may support CMMA, but the machine may not (i.e. if running as
665   * guest-3).
666   *
667   * In this case, the CMMA capabilities are all there, but the CMMA-related
668   * ioctls fail. To find out whether the machine supports CMMA, create a
669   * temporary VM and then query the CMMA feature of the VM.
670   */
machine_has_cmma(void)671  static int machine_has_cmma(void)
672  {
673  	struct kvm_vm *vm = create_vm();
674  	int r;
675  
676  	r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
677  	kvm_vm_free(vm);
678  
679  	return r;
680  }
681  
main(int argc,char * argv[])682  int main(int argc, char *argv[])
683  {
684  	int idx;
685  
686  	TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
687  	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_CMMA_MIGRATION));
688  	TEST_REQUIRE(machine_has_cmma());
689  
690  	ksft_print_header();
691  
692  	ksft_set_plan(ARRAY_SIZE(testlist));
693  
694  	for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
695  		testlist[idx].test();
696  		ksft_test_result_pass("%s\n", testlist[idx].name);
697  	}
698  
699  	ksft_finished();	/* Print results and exit() accordingly */
700  }
701