1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM selftest s390x library code - CPU-related functions (page tables...)
4  *
5  * Copyright (C) 2019, Red Hat, Inc.
6  */
7 
8 #include "processor.h"
9 #include "kvm_util.h"
10 
11 #define PAGES_PER_REGION 4
12 
virt_arch_pgd_alloc(struct kvm_vm * vm)13 void virt_arch_pgd_alloc(struct kvm_vm *vm)
14 {
15 	vm_paddr_t paddr;
16 
17 	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
18 		    vm->page_size);
19 
20 	if (vm->pgd_created)
21 		return;
22 
23 	paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
24 				   KVM_GUEST_PAGE_TABLE_MIN_PADDR,
25 				   vm->memslots[MEM_REGION_PT]);
26 	memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
27 
28 	vm->pgd = paddr;
29 	vm->pgd_created = true;
30 }
31 
32 /*
33  * Allocate 4 pages for a region/segment table (ri < 4), or one page for
34  * a page table (ri == 4). Returns a suitable region/segment table entry
35  * which points to the freshly allocated pages.
36  */
virt_alloc_region(struct kvm_vm * vm,int ri)37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
38 {
39 	uint64_t taddr;
40 
41 	taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
42 				   KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
43 	memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
44 
45 	return (taddr & REGION_ENTRY_ORIGIN)
46 		| (((4 - ri) << 2) & REGION_ENTRY_TYPE)
47 		| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
48 }
49 
virt_arch_pg_map(struct kvm_vm * vm,uint64_t gva,uint64_t gpa)50 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
51 {
52 	int ri, idx;
53 	uint64_t *entry;
54 
55 	TEST_ASSERT((gva % vm->page_size) == 0,
56 		"Virtual address not on page boundary,\n"
57 		"  vaddr: 0x%lx vm->page_size: 0x%x",
58 		gva, vm->page_size);
59 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
60 		(gva >> vm->page_shift)),
61 		"Invalid virtual address, vaddr: 0x%lx",
62 		gva);
63 	TEST_ASSERT((gpa % vm->page_size) == 0,
64 		"Physical address not on page boundary,\n"
65 		"  paddr: 0x%lx vm->page_size: 0x%x",
66 		gva, vm->page_size);
67 	TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
68 		"Physical address beyond beyond maximum supported,\n"
69 		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
70 		gva, vm->max_gfn, vm->page_size);
71 
72 	/* Walk through region and segment tables */
73 	entry = addr_gpa2hva(vm, vm->pgd);
74 	for (ri = 1; ri <= 4; ri++) {
75 		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
76 		if (entry[idx] & REGION_ENTRY_INVALID)
77 			entry[idx] = virt_alloc_region(vm, ri);
78 		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
79 	}
80 
81 	/* Fill in page table entry */
82 	idx = (gva >> 12) & 0x0ffu;		/* page index */
83 	if (!(entry[idx] & PAGE_INVALID))
84 		fprintf(stderr,
85 			"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
86 	entry[idx] = gpa;
87 }
88 
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)89 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
90 {
91 	int ri, idx;
92 	uint64_t *entry;
93 
94 	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
95 		    vm->page_size);
96 
97 	entry = addr_gpa2hva(vm, vm->pgd);
98 	for (ri = 1; ri <= 4; ri++) {
99 		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
100 		TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
101 			    "No region mapping for vm virtual address 0x%lx",
102 			    gva);
103 		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
104 	}
105 
106 	idx = (gva >> 12) & 0x0ffu;		/* page index */
107 
108 	TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
109 		    "No page mapping for vm virtual address 0x%lx", gva);
110 
111 	return (entry[idx] & ~0xffful) + (gva & 0xffful);
112 }
113 
virt_dump_ptes(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t ptea_start)114 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
115 			   uint64_t ptea_start)
116 {
117 	uint64_t *pte, ptea;
118 
119 	for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
120 		pte = addr_gpa2hva(vm, ptea);
121 		if (*pte & PAGE_INVALID)
122 			continue;
123 		fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
124 			indent, "", ptea, *pte);
125 	}
126 }
127 
virt_dump_region(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t reg_tab_addr)128 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
129 			     uint64_t reg_tab_addr)
130 {
131 	uint64_t addr, *entry;
132 
133 	for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
134 		entry = addr_gpa2hva(vm, addr);
135 		if (*entry & REGION_ENTRY_INVALID)
136 			continue;
137 		fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
138 			indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
139 			addr, *entry);
140 		if (*entry & REGION_ENTRY_TYPE) {
141 			virt_dump_region(stream, vm, indent + 2,
142 					 *entry & REGION_ENTRY_ORIGIN);
143 		} else {
144 			virt_dump_ptes(stream, vm, indent + 2,
145 				       *entry & REGION_ENTRY_ORIGIN);
146 		}
147 	}
148 }
149 
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)150 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
151 {
152 	if (!vm->pgd_created)
153 		return;
154 
155 	virt_dump_region(stream, vm, indent, vm->pgd);
156 }
157 
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)158 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
159 				  void *guest_code)
160 {
161 	size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
162 	uint64_t stack_vaddr;
163 	struct kvm_regs regs;
164 	struct kvm_sregs sregs;
165 	struct kvm_vcpu *vcpu;
166 	struct kvm_run *run;
167 
168 	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
169 		    vm->page_size);
170 
171 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
172 				       DEFAULT_GUEST_STACK_VADDR_MIN,
173 				       MEM_REGION_DATA);
174 
175 	vcpu = __vm_vcpu_add(vm, vcpu_id);
176 
177 	/* Setup guest registers */
178 	vcpu_regs_get(vcpu, &regs);
179 	regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
180 	vcpu_regs_set(vcpu, &regs);
181 
182 	vcpu_sregs_get(vcpu, &sregs);
183 	sregs.crs[0] |= 0x00040000;		/* Enable floating point regs */
184 	sregs.crs[1] = vm->pgd | 0xf;		/* Primary region table */
185 	vcpu_sregs_set(vcpu, &sregs);
186 
187 	run = vcpu->run;
188 	run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
189 	run->psw_addr = (uintptr_t)guest_code;
190 
191 	return vcpu;
192 }
193 
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)194 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
195 {
196 	va_list ap;
197 	struct kvm_regs regs;
198 	int i;
199 
200 	TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
201 		    "  num: %u\n",
202 		    num);
203 
204 	va_start(ap, num);
205 	vcpu_regs_get(vcpu, &regs);
206 
207 	for (i = 0; i < num; i++)
208 		regs.gprs[i + 2] = va_arg(ap, uint64_t);
209 
210 	vcpu_regs_set(vcpu, &regs);
211 	va_end(ap);
212 }
213 
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)214 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
215 {
216 	fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
217 		indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
218 }
219 
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)220 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
221 {
222 }
223