xref: /openbmc/linux/tools/testing/selftests/kvm/lib/s390x/processor.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1edf54478SThomas Huth // SPDX-License-Identifier: GPL-2.0-only
2edf54478SThomas Huth /*
3edf54478SThomas Huth  * KVM selftest s390x library code - CPU-related functions (page tables...)
4edf54478SThomas Huth  *
5edf54478SThomas Huth  * Copyright (C) 2019, Red Hat, Inc.
6edf54478SThomas Huth  */
7edf54478SThomas Huth 
8edf54478SThomas Huth #include "processor.h"
9edf54478SThomas Huth #include "kvm_util.h"
10edf54478SThomas Huth 
11edf54478SThomas Huth #define PAGES_PER_REGION 4
12edf54478SThomas Huth 
virt_arch_pgd_alloc(struct kvm_vm * vm)139931be3fSSean Christopherson void virt_arch_pgd_alloc(struct kvm_vm *vm)
14edf54478SThomas Huth {
15edf54478SThomas Huth 	vm_paddr_t paddr;
16edf54478SThomas Huth 
17edf54478SThomas Huth 	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
18edf54478SThomas Huth 		    vm->page_size);
19edf54478SThomas Huth 
20edf54478SThomas Huth 	if (vm->pgd_created)
21edf54478SThomas Huth 		return;
22edf54478SThomas Huth 
23edf54478SThomas Huth 	paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
24*1446e331SRicardo Koller 				   KVM_GUEST_PAGE_TABLE_MIN_PADDR,
25*1446e331SRicardo Koller 				   vm->memslots[MEM_REGION_PT]);
26edf54478SThomas Huth 	memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
27edf54478SThomas Huth 
28edf54478SThomas Huth 	vm->pgd = paddr;
29edf54478SThomas Huth 	vm->pgd_created = true;
30edf54478SThomas Huth }
31edf54478SThomas Huth 
32edf54478SThomas Huth /*
33edf54478SThomas Huth  * Allocate 4 pages for a region/segment table (ri < 4), or one page for
34edf54478SThomas Huth  * a page table (ri == 4). Returns a suitable region/segment table entry
35edf54478SThomas Huth  * which points to the freshly allocated pages.
36edf54478SThomas Huth  */
virt_alloc_region(struct kvm_vm * vm,int ri)374307af73SSean Christopherson static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
38edf54478SThomas Huth {
39edf54478SThomas Huth 	uint64_t taddr;
40edf54478SThomas Huth 
41edf54478SThomas Huth 	taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
424307af73SSean Christopherson 				   KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
43edf54478SThomas Huth 	memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
44edf54478SThomas Huth 
45edf54478SThomas Huth 	return (taddr & REGION_ENTRY_ORIGIN)
46edf54478SThomas Huth 		| (((4 - ri) << 2) & REGION_ENTRY_TYPE)
47edf54478SThomas Huth 		| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
48edf54478SThomas Huth }
49edf54478SThomas Huth 
virt_arch_pg_map(struct kvm_vm * vm,uint64_t gva,uint64_t gpa)509931be3fSSean Christopherson void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
51edf54478SThomas Huth {
52edf54478SThomas Huth 	int ri, idx;
53edf54478SThomas Huth 	uint64_t *entry;
54edf54478SThomas Huth 
55edf54478SThomas Huth 	TEST_ASSERT((gva % vm->page_size) == 0,
56edf54478SThomas Huth 		"Virtual address not on page boundary,\n"
57edf54478SThomas Huth 		"  vaddr: 0x%lx vm->page_size: 0x%x",
58edf54478SThomas Huth 		gva, vm->page_size);
59edf54478SThomas Huth 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
60edf54478SThomas Huth 		(gva >> vm->page_shift)),
61edf54478SThomas Huth 		"Invalid virtual address, vaddr: 0x%lx",
62edf54478SThomas Huth 		gva);
63edf54478SThomas Huth 	TEST_ASSERT((gpa % vm->page_size) == 0,
64edf54478SThomas Huth 		"Physical address not on page boundary,\n"
65edf54478SThomas Huth 		"  paddr: 0x%lx vm->page_size: 0x%x",
66edf54478SThomas Huth 		gva, vm->page_size);
67edf54478SThomas Huth 	TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
68edf54478SThomas Huth 		"Physical address beyond beyond maximum supported,\n"
69edf54478SThomas Huth 		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
70edf54478SThomas Huth 		gva, vm->max_gfn, vm->page_size);
71edf54478SThomas Huth 
72edf54478SThomas Huth 	/* Walk through region and segment tables */
73edf54478SThomas Huth 	entry = addr_gpa2hva(vm, vm->pgd);
74edf54478SThomas Huth 	for (ri = 1; ri <= 4; ri++) {
75edf54478SThomas Huth 		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
76edf54478SThomas Huth 		if (entry[idx] & REGION_ENTRY_INVALID)
774307af73SSean Christopherson 			entry[idx] = virt_alloc_region(vm, ri);
78edf54478SThomas Huth 		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
79edf54478SThomas Huth 	}
80edf54478SThomas Huth 
81edf54478SThomas Huth 	/* Fill in page table entry */
82edf54478SThomas Huth 	idx = (gva >> 12) & 0x0ffu;		/* page index */
83edf54478SThomas Huth 	if (!(entry[idx] & PAGE_INVALID))
84edf54478SThomas Huth 		fprintf(stderr,
85edf54478SThomas Huth 			"WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
86edf54478SThomas Huth 	entry[idx] = gpa;
87edf54478SThomas Huth }
88edf54478SThomas Huth 
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)899931be3fSSean Christopherson vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
90edf54478SThomas Huth {
91edf54478SThomas Huth 	int ri, idx;
92edf54478SThomas Huth 	uint64_t *entry;
93edf54478SThomas Huth 
94edf54478SThomas Huth 	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
95edf54478SThomas Huth 		    vm->page_size);
96edf54478SThomas Huth 
97edf54478SThomas Huth 	entry = addr_gpa2hva(vm, vm->pgd);
98edf54478SThomas Huth 	for (ri = 1; ri <= 4; ri++) {
99edf54478SThomas Huth 		idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
100edf54478SThomas Huth 		TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
101edf54478SThomas Huth 			    "No region mapping for vm virtual address 0x%lx",
102edf54478SThomas Huth 			    gva);
103edf54478SThomas Huth 		entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
104edf54478SThomas Huth 	}
105edf54478SThomas Huth 
106edf54478SThomas Huth 	idx = (gva >> 12) & 0x0ffu;		/* page index */
107edf54478SThomas Huth 
108edf54478SThomas Huth 	TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
109edf54478SThomas Huth 		    "No page mapping for vm virtual address 0x%lx", gva);
110edf54478SThomas Huth 
111edf54478SThomas Huth 	return (entry[idx] & ~0xffful) + (gva & 0xffful);
112edf54478SThomas Huth }
113edf54478SThomas Huth 
virt_dump_ptes(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t ptea_start)114edf54478SThomas Huth static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
115edf54478SThomas Huth 			   uint64_t ptea_start)
116edf54478SThomas Huth {
117edf54478SThomas Huth 	uint64_t *pte, ptea;
118edf54478SThomas Huth 
119edf54478SThomas Huth 	for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
120edf54478SThomas Huth 		pte = addr_gpa2hva(vm, ptea);
121edf54478SThomas Huth 		if (*pte & PAGE_INVALID)
122edf54478SThomas Huth 			continue;
123edf54478SThomas Huth 		fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
124edf54478SThomas Huth 			indent, "", ptea, *pte);
125edf54478SThomas Huth 	}
126edf54478SThomas Huth }
127edf54478SThomas Huth 
virt_dump_region(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t reg_tab_addr)128edf54478SThomas Huth static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
129edf54478SThomas Huth 			     uint64_t reg_tab_addr)
130edf54478SThomas Huth {
131edf54478SThomas Huth 	uint64_t addr, *entry;
132edf54478SThomas Huth 
133edf54478SThomas Huth 	for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
134edf54478SThomas Huth 		entry = addr_gpa2hva(vm, addr);
135edf54478SThomas Huth 		if (*entry & REGION_ENTRY_INVALID)
136edf54478SThomas Huth 			continue;
137edf54478SThomas Huth 		fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
138edf54478SThomas Huth 			indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
139edf54478SThomas Huth 			addr, *entry);
140edf54478SThomas Huth 		if (*entry & REGION_ENTRY_TYPE) {
141edf54478SThomas Huth 			virt_dump_region(stream, vm, indent + 2,
142edf54478SThomas Huth 					 *entry & REGION_ENTRY_ORIGIN);
143edf54478SThomas Huth 		} else {
144edf54478SThomas Huth 			virt_dump_ptes(stream, vm, indent + 2,
145edf54478SThomas Huth 				       *entry & REGION_ENTRY_ORIGIN);
146edf54478SThomas Huth 		}
147edf54478SThomas Huth 	}
148edf54478SThomas Huth }
149edf54478SThomas Huth 
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1509931be3fSSean Christopherson void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
151edf54478SThomas Huth {
152edf54478SThomas Huth 	if (!vm->pgd_created)
153edf54478SThomas Huth 		return;
154edf54478SThomas Huth 
155edf54478SThomas Huth 	virt_dump_region(stream, vm, indent, vm->pgd);
156edf54478SThomas Huth }
157edf54478SThomas Huth 
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)1581422efd6SSean Christopherson struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1591422efd6SSean Christopherson 				  void *guest_code)
160edf54478SThomas Huth {
161edf54478SThomas Huth 	size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
162edf54478SThomas Huth 	uint64_t stack_vaddr;
163edf54478SThomas Huth 	struct kvm_regs regs;
164edf54478SThomas Huth 	struct kvm_sregs sregs;
1651422efd6SSean Christopherson 	struct kvm_vcpu *vcpu;
166edf54478SThomas Huth 	struct kvm_run *run;
167edf54478SThomas Huth 
168edf54478SThomas Huth 	TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
169edf54478SThomas Huth 		    vm->page_size);
170edf54478SThomas Huth 
171*1446e331SRicardo Koller 	stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
172*1446e331SRicardo Koller 				       DEFAULT_GUEST_STACK_VADDR_MIN,
173*1446e331SRicardo Koller 				       MEM_REGION_DATA);
174edf54478SThomas Huth 
175f742d94fSSean Christopherson 	vcpu = __vm_vcpu_add(vm, vcpu_id);
176edf54478SThomas Huth 
177edf54478SThomas Huth 	/* Setup guest registers */
178768e9a61SSean Christopherson 	vcpu_regs_get(vcpu, &regs);
179edf54478SThomas Huth 	regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
180768e9a61SSean Christopherson 	vcpu_regs_set(vcpu, &regs);
181edf54478SThomas Huth 
182768e9a61SSean Christopherson 	vcpu_sregs_get(vcpu, &sregs);
183fd4198bfSPaolo Bonzini 	sregs.crs[0] |= 0x00040000;		/* Enable floating point regs */
184edf54478SThomas Huth 	sregs.crs[1] = vm->pgd | 0xf;		/* Primary region table */
185768e9a61SSean Christopherson 	vcpu_sregs_set(vcpu, &sregs);
186edf54478SThomas Huth 
187768e9a61SSean Christopherson 	run = vcpu->run;
188edf54478SThomas Huth 	run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
189edf54478SThomas Huth 	run->psw_addr = (uintptr_t)guest_code;
1901422efd6SSean Christopherson 
1911422efd6SSean Christopherson 	return vcpu;
192edf54478SThomas Huth }
193edf54478SThomas Huth 
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)194768e9a61SSean Christopherson void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
1959bbf2474SBen Gardon {
1969bbf2474SBen Gardon 	va_list ap;
1979bbf2474SBen Gardon 	struct kvm_regs regs;
1989bbf2474SBen Gardon 	int i;
1999bbf2474SBen Gardon 
2009bbf2474SBen Gardon 	TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
2019bbf2474SBen Gardon 		    "  num: %u\n",
2029bbf2474SBen Gardon 		    num);
2039bbf2474SBen Gardon 
2049bbf2474SBen Gardon 	va_start(ap, num);
205768e9a61SSean Christopherson 	vcpu_regs_get(vcpu, &regs);
2069bbf2474SBen Gardon 
2079bbf2474SBen Gardon 	for (i = 0; i < num; i++)
2089bbf2474SBen Gardon 		regs.gprs[i + 2] = va_arg(ap, uint64_t);
2099bbf2474SBen Gardon 
210768e9a61SSean Christopherson 	vcpu_regs_set(vcpu, &regs);
2119bbf2474SBen Gardon 	va_end(ap);
2129bbf2474SBen Gardon }
2139bbf2474SBen Gardon 
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)214768e9a61SSean Christopherson void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
215edf54478SThomas Huth {
216edf54478SThomas Huth 	fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
2171079c3d4SSean Christopherson 		indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
218edf54478SThomas Huth }
21929faeb96SAaron Lewis 
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)220768e9a61SSean Christopherson void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
22129faeb96SAaron Lewis {
22229faeb96SAaron Lewis }
223