xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision b0c632db637d68ad39d9f97f452ce176253f5f4e)
1*b0c632dbSHeiko Carstens /*
2*b0c632dbSHeiko Carstens  * s390host.c --  hosting zSeries kernel virtual machines
3*b0c632dbSHeiko Carstens  *
4*b0c632dbSHeiko Carstens  * Copyright IBM Corp. 2008
5*b0c632dbSHeiko Carstens  *
6*b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7*b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8*b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9*b0c632dbSHeiko Carstens  *
10*b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11*b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12*b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13*b0c632dbSHeiko Carstens  */
14*b0c632dbSHeiko Carstens 
15*b0c632dbSHeiko Carstens #include <linux/compiler.h>
16*b0c632dbSHeiko Carstens #include <linux/err.h>
17*b0c632dbSHeiko Carstens #include <linux/fs.h>
18*b0c632dbSHeiko Carstens #include <linux/init.h>
19*b0c632dbSHeiko Carstens #include <linux/kvm.h>
20*b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
21*b0c632dbSHeiko Carstens #include <linux/module.h>
22*b0c632dbSHeiko Carstens #include <linux/slab.h>
23*b0c632dbSHeiko Carstens #include <asm/lowcore.h>
24*b0c632dbSHeiko Carstens #include <asm/pgtable.h>
25*b0c632dbSHeiko Carstens 
26*b0c632dbSHeiko Carstens #include "gaccess.h"
27*b0c632dbSHeiko Carstens 
28*b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
29*b0c632dbSHeiko Carstens 
30*b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
31*b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
32*b0c632dbSHeiko Carstens 	{ NULL }
33*b0c632dbSHeiko Carstens };
34*b0c632dbSHeiko Carstens 
35*b0c632dbSHeiko Carstens 
36*b0c632dbSHeiko Carstens /* Section: not file related */
37*b0c632dbSHeiko Carstens void kvm_arch_hardware_enable(void *garbage)
38*b0c632dbSHeiko Carstens {
39*b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
40*b0c632dbSHeiko Carstens }
41*b0c632dbSHeiko Carstens 
42*b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage)
43*b0c632dbSHeiko Carstens {
44*b0c632dbSHeiko Carstens }
45*b0c632dbSHeiko Carstens 
46*b0c632dbSHeiko Carstens void decache_vcpus_on_cpu(int cpu)
47*b0c632dbSHeiko Carstens {
48*b0c632dbSHeiko Carstens }
49*b0c632dbSHeiko Carstens 
50*b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
51*b0c632dbSHeiko Carstens {
52*b0c632dbSHeiko Carstens 	return 0;
53*b0c632dbSHeiko Carstens }
54*b0c632dbSHeiko Carstens 
55*b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
56*b0c632dbSHeiko Carstens {
57*b0c632dbSHeiko Carstens }
58*b0c632dbSHeiko Carstens 
59*b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn)
60*b0c632dbSHeiko Carstens {
61*b0c632dbSHeiko Carstens }
62*b0c632dbSHeiko Carstens 
63*b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
64*b0c632dbSHeiko Carstens {
65*b0c632dbSHeiko Carstens 	return 0;
66*b0c632dbSHeiko Carstens }
67*b0c632dbSHeiko Carstens 
68*b0c632dbSHeiko Carstens void kvm_arch_exit(void)
69*b0c632dbSHeiko Carstens {
70*b0c632dbSHeiko Carstens }
71*b0c632dbSHeiko Carstens 
72*b0c632dbSHeiko Carstens /* Section: device related */
73*b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
74*b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
75*b0c632dbSHeiko Carstens {
76*b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
77*b0c632dbSHeiko Carstens 		return s390_enable_sie();
78*b0c632dbSHeiko Carstens 	return -EINVAL;
79*b0c632dbSHeiko Carstens }
80*b0c632dbSHeiko Carstens 
81*b0c632dbSHeiko Carstens int kvm_dev_ioctl_check_extension(long ext)
82*b0c632dbSHeiko Carstens {
83*b0c632dbSHeiko Carstens 	return 0;
84*b0c632dbSHeiko Carstens }
85*b0c632dbSHeiko Carstens 
86*b0c632dbSHeiko Carstens /* Section: vm related */
87*b0c632dbSHeiko Carstens /*
88*b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
89*b0c632dbSHeiko Carstens  */
90*b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
91*b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
92*b0c632dbSHeiko Carstens {
93*b0c632dbSHeiko Carstens 	return 0;
94*b0c632dbSHeiko Carstens }
95*b0c632dbSHeiko Carstens 
96*b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
97*b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
98*b0c632dbSHeiko Carstens {
99*b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
100*b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
101*b0c632dbSHeiko Carstens 	int r;
102*b0c632dbSHeiko Carstens 
103*b0c632dbSHeiko Carstens 	switch (ioctl) {
104*b0c632dbSHeiko Carstens 	default:
105*b0c632dbSHeiko Carstens 		r = -EINVAL;
106*b0c632dbSHeiko Carstens 	}
107*b0c632dbSHeiko Carstens 
108*b0c632dbSHeiko Carstens 	return r;
109*b0c632dbSHeiko Carstens }
110*b0c632dbSHeiko Carstens 
111*b0c632dbSHeiko Carstens struct kvm *kvm_arch_create_vm(void)
112*b0c632dbSHeiko Carstens {
113*b0c632dbSHeiko Carstens 	struct kvm *kvm;
114*b0c632dbSHeiko Carstens 	int rc;
115*b0c632dbSHeiko Carstens 	char debug_name[16];
116*b0c632dbSHeiko Carstens 
117*b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
118*b0c632dbSHeiko Carstens 	if (rc)
119*b0c632dbSHeiko Carstens 		goto out_nokvm;
120*b0c632dbSHeiko Carstens 
121*b0c632dbSHeiko Carstens 	rc = -ENOMEM;
122*b0c632dbSHeiko Carstens 	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
123*b0c632dbSHeiko Carstens 	if (!kvm)
124*b0c632dbSHeiko Carstens 		goto out_nokvm;
125*b0c632dbSHeiko Carstens 
126*b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
127*b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
128*b0c632dbSHeiko Carstens 		goto out_nosca;
129*b0c632dbSHeiko Carstens 
130*b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
131*b0c632dbSHeiko Carstens 
132*b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
133*b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
134*b0c632dbSHeiko Carstens 		goto out_nodbf;
135*b0c632dbSHeiko Carstens 
136*b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
137*b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
138*b0c632dbSHeiko Carstens 
139*b0c632dbSHeiko Carstens 	try_module_get(THIS_MODULE);
140*b0c632dbSHeiko Carstens 
141*b0c632dbSHeiko Carstens 	return kvm;
142*b0c632dbSHeiko Carstens out_nodbf:
143*b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
144*b0c632dbSHeiko Carstens out_nosca:
145*b0c632dbSHeiko Carstens 	kfree(kvm);
146*b0c632dbSHeiko Carstens out_nokvm:
147*b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
148*b0c632dbSHeiko Carstens }
149*b0c632dbSHeiko Carstens 
150*b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
151*b0c632dbSHeiko Carstens {
152*b0c632dbSHeiko Carstens 	debug_unregister(kvm->arch.dbf);
153*b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
154*b0c632dbSHeiko Carstens 	kfree(kvm);
155*b0c632dbSHeiko Carstens 	module_put(THIS_MODULE);
156*b0c632dbSHeiko Carstens }
157*b0c632dbSHeiko Carstens 
158*b0c632dbSHeiko Carstens /* Section: vcpu related */
159*b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
160*b0c632dbSHeiko Carstens {
161*b0c632dbSHeiko Carstens 	return 0;
162*b0c632dbSHeiko Carstens }
163*b0c632dbSHeiko Carstens 
164*b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
165*b0c632dbSHeiko Carstens {
166*b0c632dbSHeiko Carstens 	/* kvm common code refers to this, but does'nt call it */
167*b0c632dbSHeiko Carstens 	BUG();
168*b0c632dbSHeiko Carstens }
169*b0c632dbSHeiko Carstens 
170*b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
171*b0c632dbSHeiko Carstens {
172*b0c632dbSHeiko Carstens 	save_fp_regs(&vcpu->arch.host_fpregs);
173*b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
174*b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
175*b0c632dbSHeiko Carstens 	restore_fp_regs(&vcpu->arch.guest_fpregs);
176*b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.guest_acrs);
177*b0c632dbSHeiko Carstens 
178*b0c632dbSHeiko Carstens 	if (signal_pending(current))
179*b0c632dbSHeiko Carstens 		atomic_set_mask(CPUSTAT_STOP_INT,
180*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->cpuflags);
181*b0c632dbSHeiko Carstens }
182*b0c632dbSHeiko Carstens 
183*b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
184*b0c632dbSHeiko Carstens {
185*b0c632dbSHeiko Carstens 	save_fp_regs(&vcpu->arch.guest_fpregs);
186*b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.guest_acrs);
187*b0c632dbSHeiko Carstens 	restore_fp_regs(&vcpu->arch.host_fpregs);
188*b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
189*b0c632dbSHeiko Carstens }
190*b0c632dbSHeiko Carstens 
191*b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
192*b0c632dbSHeiko Carstens {
193*b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
194*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
195*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
196*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->prefix    = 0UL;
197*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ihcpu     = 0xffff;
198*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
199*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
200*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
201*b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
202*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
203*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
204*b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
205*b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
206*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
207*b0c632dbSHeiko Carstens }
208*b0c632dbSHeiko Carstens 
209*b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
210*b0c632dbSHeiko Carstens {
211*b0c632dbSHeiko Carstens 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
212*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
213*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gmsor = 0x000000000000;
214*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ecb   = 2;
215*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->eca   = 0xC1002001U;
216*b0c632dbSHeiko Carstens 
217*b0c632dbSHeiko Carstens 	return 0;
218*b0c632dbSHeiko Carstens }
219*b0c632dbSHeiko Carstens 
220*b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
221*b0c632dbSHeiko Carstens 				      unsigned int id)
222*b0c632dbSHeiko Carstens {
223*b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
224*b0c632dbSHeiko Carstens 	int rc = -ENOMEM;
225*b0c632dbSHeiko Carstens 
226*b0c632dbSHeiko Carstens 	if (!vcpu)
227*b0c632dbSHeiko Carstens 		goto out_nomem;
228*b0c632dbSHeiko Carstens 
229*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
230*b0c632dbSHeiko Carstens 
231*b0c632dbSHeiko Carstens 	if (!vcpu->arch.sie_block)
232*b0c632dbSHeiko Carstens 		goto out_free_cpu;
233*b0c632dbSHeiko Carstens 
234*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
235*b0c632dbSHeiko Carstens 	BUG_ON(!kvm->arch.sca);
236*b0c632dbSHeiko Carstens 	BUG_ON(kvm->arch.sca->cpu[id].sda);
237*b0c632dbSHeiko Carstens 	kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
238*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
239*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
240*b0c632dbSHeiko Carstens 
241*b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
242*b0c632dbSHeiko Carstens 	if (rc)
243*b0c632dbSHeiko Carstens 		goto out_free_cpu;
244*b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
245*b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
246*b0c632dbSHeiko Carstens 
247*b0c632dbSHeiko Carstens 	try_module_get(THIS_MODULE);
248*b0c632dbSHeiko Carstens 
249*b0c632dbSHeiko Carstens 	return vcpu;
250*b0c632dbSHeiko Carstens out_free_cpu:
251*b0c632dbSHeiko Carstens 	kfree(vcpu);
252*b0c632dbSHeiko Carstens out_nomem:
253*b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
254*b0c632dbSHeiko Carstens }
255*b0c632dbSHeiko Carstens 
256*b0c632dbSHeiko Carstens void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
257*b0c632dbSHeiko Carstens {
258*b0c632dbSHeiko Carstens 	VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
259*b0c632dbSHeiko Carstens 	free_page((unsigned long)(vcpu->arch.sie_block));
260*b0c632dbSHeiko Carstens 	kfree(vcpu);
261*b0c632dbSHeiko Carstens 	module_put(THIS_MODULE);
262*b0c632dbSHeiko Carstens }
263*b0c632dbSHeiko Carstens 
264*b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
265*b0c632dbSHeiko Carstens {
266*b0c632dbSHeiko Carstens 	/* kvm common code refers to this, but never calls it */
267*b0c632dbSHeiko Carstens 	BUG();
268*b0c632dbSHeiko Carstens 	return 0;
269*b0c632dbSHeiko Carstens }
270*b0c632dbSHeiko Carstens 
271*b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
272*b0c632dbSHeiko Carstens {
273*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
274*b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
275*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
276*b0c632dbSHeiko Carstens 	return 0;
277*b0c632dbSHeiko Carstens }
278*b0c632dbSHeiko Carstens 
279*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
280*b0c632dbSHeiko Carstens {
281*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
282*b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
283*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
284*b0c632dbSHeiko Carstens 	return 0;
285*b0c632dbSHeiko Carstens }
286*b0c632dbSHeiko Carstens 
287*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
288*b0c632dbSHeiko Carstens {
289*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
290*b0c632dbSHeiko Carstens 	memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
291*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
292*b0c632dbSHeiko Carstens 	return 0;
293*b0c632dbSHeiko Carstens }
294*b0c632dbSHeiko Carstens 
295*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
296*b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
297*b0c632dbSHeiko Carstens {
298*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
299*b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
300*b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
301*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
302*b0c632dbSHeiko Carstens 	return 0;
303*b0c632dbSHeiko Carstens }
304*b0c632dbSHeiko Carstens 
305*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
306*b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
307*b0c632dbSHeiko Carstens {
308*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
309*b0c632dbSHeiko Carstens 	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
310*b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
311*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
312*b0c632dbSHeiko Carstens 	return 0;
313*b0c632dbSHeiko Carstens }
314*b0c632dbSHeiko Carstens 
315*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
316*b0c632dbSHeiko Carstens {
317*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
318*b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
319*b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
320*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
321*b0c632dbSHeiko Carstens 	return 0;
322*b0c632dbSHeiko Carstens }
323*b0c632dbSHeiko Carstens 
324*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
325*b0c632dbSHeiko Carstens {
326*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
327*b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
328*b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
329*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
330*b0c632dbSHeiko Carstens 	return 0;
331*b0c632dbSHeiko Carstens }
332*b0c632dbSHeiko Carstens 
333*b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
334*b0c632dbSHeiko Carstens {
335*b0c632dbSHeiko Carstens 	int rc = 0;
336*b0c632dbSHeiko Carstens 
337*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
338*b0c632dbSHeiko Carstens 	if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
339*b0c632dbSHeiko Carstens 		rc = -EBUSY;
340*b0c632dbSHeiko Carstens 	else
341*b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->gpsw = psw;
342*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
343*b0c632dbSHeiko Carstens 	return rc;
344*b0c632dbSHeiko Carstens }
345*b0c632dbSHeiko Carstens 
346*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
347*b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
348*b0c632dbSHeiko Carstens {
349*b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
350*b0c632dbSHeiko Carstens }
351*b0c632dbSHeiko Carstens 
352*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
353*b0c632dbSHeiko Carstens 				    struct kvm_debug_guest *dbg)
354*b0c632dbSHeiko Carstens {
355*b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
356*b0c632dbSHeiko Carstens }
357*b0c632dbSHeiko Carstens 
358*b0c632dbSHeiko Carstens static void __vcpu_run(struct kvm_vcpu *vcpu)
359*b0c632dbSHeiko Carstens {
360*b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
361*b0c632dbSHeiko Carstens 
362*b0c632dbSHeiko Carstens 	if (need_resched())
363*b0c632dbSHeiko Carstens 		schedule();
364*b0c632dbSHeiko Carstens 
365*b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
366*b0c632dbSHeiko Carstens 	local_irq_disable();
367*b0c632dbSHeiko Carstens 	kvm_guest_enter();
368*b0c632dbSHeiko Carstens 	local_irq_enable();
369*b0c632dbSHeiko Carstens 	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
370*b0c632dbSHeiko Carstens 		   atomic_read(&vcpu->arch.sie_block->cpuflags));
371*b0c632dbSHeiko Carstens 	sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
372*b0c632dbSHeiko Carstens 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
373*b0c632dbSHeiko Carstens 		   vcpu->arch.sie_block->icptcode);
374*b0c632dbSHeiko Carstens 	local_irq_disable();
375*b0c632dbSHeiko Carstens 	kvm_guest_exit();
376*b0c632dbSHeiko Carstens 	local_irq_enable();
377*b0c632dbSHeiko Carstens 
378*b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
379*b0c632dbSHeiko Carstens }
380*b0c632dbSHeiko Carstens 
381*b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
382*b0c632dbSHeiko Carstens {
383*b0c632dbSHeiko Carstens 	sigset_t sigsaved;
384*b0c632dbSHeiko Carstens 
385*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
386*b0c632dbSHeiko Carstens 
387*b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
388*b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
389*b0c632dbSHeiko Carstens 
390*b0c632dbSHeiko Carstens 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
391*b0c632dbSHeiko Carstens 
392*b0c632dbSHeiko Carstens 	__vcpu_run(vcpu);
393*b0c632dbSHeiko Carstens 
394*b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
395*b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
396*b0c632dbSHeiko Carstens 
397*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
398*b0c632dbSHeiko Carstens 
399*b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
400*b0c632dbSHeiko Carstens 	return 0;
401*b0c632dbSHeiko Carstens }
402*b0c632dbSHeiko Carstens 
403*b0c632dbSHeiko Carstens static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
404*b0c632dbSHeiko Carstens 		       unsigned long n, int prefix)
405*b0c632dbSHeiko Carstens {
406*b0c632dbSHeiko Carstens 	if (prefix)
407*b0c632dbSHeiko Carstens 		return copy_to_guest(vcpu, guestdest, from, n);
408*b0c632dbSHeiko Carstens 	else
409*b0c632dbSHeiko Carstens 		return copy_to_guest_absolute(vcpu, guestdest, from, n);
410*b0c632dbSHeiko Carstens }
411*b0c632dbSHeiko Carstens 
412*b0c632dbSHeiko Carstens /*
413*b0c632dbSHeiko Carstens  * store status at address
414*b0c632dbSHeiko Carstens  * we use have two special cases:
415*b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
416*b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
417*b0c632dbSHeiko Carstens  */
418*b0c632dbSHeiko Carstens int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
419*b0c632dbSHeiko Carstens {
420*b0c632dbSHeiko Carstens 	const unsigned char archmode = 1;
421*b0c632dbSHeiko Carstens 	int prefix;
422*b0c632dbSHeiko Carstens 
423*b0c632dbSHeiko Carstens 	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
424*b0c632dbSHeiko Carstens 		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
425*b0c632dbSHeiko Carstens 			return -EFAULT;
426*b0c632dbSHeiko Carstens 		addr = SAVE_AREA_BASE;
427*b0c632dbSHeiko Carstens 		prefix = 0;
428*b0c632dbSHeiko Carstens 	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
429*b0c632dbSHeiko Carstens 		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
430*b0c632dbSHeiko Carstens 			return -EFAULT;
431*b0c632dbSHeiko Carstens 		addr = SAVE_AREA_BASE;
432*b0c632dbSHeiko Carstens 		prefix = 1;
433*b0c632dbSHeiko Carstens 	} else
434*b0c632dbSHeiko Carstens 		prefix = 0;
435*b0c632dbSHeiko Carstens 
436*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
437*b0c632dbSHeiko Carstens 			vcpu->arch.guest_fpregs.fprs, 128, prefix))
438*b0c632dbSHeiko Carstens 		return -EFAULT;
439*b0c632dbSHeiko Carstens 
440*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
441*b0c632dbSHeiko Carstens 			vcpu->arch.guest_gprs, 128, prefix))
442*b0c632dbSHeiko Carstens 		return -EFAULT;
443*b0c632dbSHeiko Carstens 
444*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
445*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->gpsw, 16, prefix))
446*b0c632dbSHeiko Carstens 		return -EFAULT;
447*b0c632dbSHeiko Carstens 
448*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
449*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->prefix, 4, prefix))
450*b0c632dbSHeiko Carstens 		return -EFAULT;
451*b0c632dbSHeiko Carstens 
452*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu,
453*b0c632dbSHeiko Carstens 			addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
454*b0c632dbSHeiko Carstens 			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
455*b0c632dbSHeiko Carstens 		return -EFAULT;
456*b0c632dbSHeiko Carstens 
457*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
458*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->todpr, 4, prefix))
459*b0c632dbSHeiko Carstens 		return -EFAULT;
460*b0c632dbSHeiko Carstens 
461*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
462*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->cputm, 8, prefix))
463*b0c632dbSHeiko Carstens 		return -EFAULT;
464*b0c632dbSHeiko Carstens 
465*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
466*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->ckc, 8, prefix))
467*b0c632dbSHeiko Carstens 		return -EFAULT;
468*b0c632dbSHeiko Carstens 
469*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
470*b0c632dbSHeiko Carstens 			&vcpu->arch.guest_acrs, 64, prefix))
471*b0c632dbSHeiko Carstens 		return -EFAULT;
472*b0c632dbSHeiko Carstens 
473*b0c632dbSHeiko Carstens 	if (__guestcopy(vcpu,
474*b0c632dbSHeiko Carstens 			addr + offsetof(struct save_area_s390x, ctrl_regs),
475*b0c632dbSHeiko Carstens 			&vcpu->arch.sie_block->gcr, 128, prefix))
476*b0c632dbSHeiko Carstens 		return -EFAULT;
477*b0c632dbSHeiko Carstens 	return 0;
478*b0c632dbSHeiko Carstens }
479*b0c632dbSHeiko Carstens 
480*b0c632dbSHeiko Carstens static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
481*b0c632dbSHeiko Carstens {
482*b0c632dbSHeiko Carstens 	int rc;
483*b0c632dbSHeiko Carstens 
484*b0c632dbSHeiko Carstens 	vcpu_load(vcpu);
485*b0c632dbSHeiko Carstens 	rc = __kvm_s390_vcpu_store_status(vcpu, addr);
486*b0c632dbSHeiko Carstens 	vcpu_put(vcpu);
487*b0c632dbSHeiko Carstens 	return rc;
488*b0c632dbSHeiko Carstens }
489*b0c632dbSHeiko Carstens 
490*b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
491*b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
492*b0c632dbSHeiko Carstens {
493*b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
494*b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
495*b0c632dbSHeiko Carstens 
496*b0c632dbSHeiko Carstens 	switch (ioctl) {
497*b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
498*b0c632dbSHeiko Carstens 		return kvm_s390_vcpu_store_status(vcpu, arg);
499*b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
500*b0c632dbSHeiko Carstens 		psw_t psw;
501*b0c632dbSHeiko Carstens 
502*b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
503*b0c632dbSHeiko Carstens 			return -EFAULT;
504*b0c632dbSHeiko Carstens 		return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
505*b0c632dbSHeiko Carstens 	}
506*b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
507*b0c632dbSHeiko Carstens 		return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
508*b0c632dbSHeiko Carstens 	default:
509*b0c632dbSHeiko Carstens 		;
510*b0c632dbSHeiko Carstens 	}
511*b0c632dbSHeiko Carstens 	return -EINVAL;
512*b0c632dbSHeiko Carstens }
513*b0c632dbSHeiko Carstens 
514*b0c632dbSHeiko Carstens /* Section: memory related */
515*b0c632dbSHeiko Carstens int kvm_arch_set_memory_region(struct kvm *kvm,
516*b0c632dbSHeiko Carstens 				struct kvm_userspace_memory_region *mem,
517*b0c632dbSHeiko Carstens 				struct kvm_memory_slot old,
518*b0c632dbSHeiko Carstens 				int user_alloc)
519*b0c632dbSHeiko Carstens {
520*b0c632dbSHeiko Carstens 	/* A few sanity checks. We can have exactly one memory slot which has
521*b0c632dbSHeiko Carstens 	   to start at guest virtual zero and which has to be located at a
522*b0c632dbSHeiko Carstens 	   page boundary in userland and which has to end at a page boundary.
523*b0c632dbSHeiko Carstens 	   The memory in userland is ok to be fragmented into various different
524*b0c632dbSHeiko Carstens 	   vmas. It is okay to mmap() and munmap() stuff in this slot after
525*b0c632dbSHeiko Carstens 	   doing this call at any time */
526*b0c632dbSHeiko Carstens 
527*b0c632dbSHeiko Carstens 	if (mem->slot)
528*b0c632dbSHeiko Carstens 		return -EINVAL;
529*b0c632dbSHeiko Carstens 
530*b0c632dbSHeiko Carstens 	if (mem->guest_phys_addr)
531*b0c632dbSHeiko Carstens 		return -EINVAL;
532*b0c632dbSHeiko Carstens 
533*b0c632dbSHeiko Carstens 	if (mem->userspace_addr & (PAGE_SIZE - 1))
534*b0c632dbSHeiko Carstens 		return -EINVAL;
535*b0c632dbSHeiko Carstens 
536*b0c632dbSHeiko Carstens 	if (mem->memory_size & (PAGE_SIZE - 1))
537*b0c632dbSHeiko Carstens 		return -EINVAL;
538*b0c632dbSHeiko Carstens 
539*b0c632dbSHeiko Carstens 	kvm->arch.guest_origin = mem->userspace_addr;
540*b0c632dbSHeiko Carstens 	kvm->arch.guest_memsize = mem->memory_size;
541*b0c632dbSHeiko Carstens 
542*b0c632dbSHeiko Carstens 	/* FIXME: we do want to interrupt running CPUs and update their memory
543*b0c632dbSHeiko Carstens 	   configuration now to avoid race conditions. But hey, changing the
544*b0c632dbSHeiko Carstens 	   memory layout while virtual CPUs are running is usually bad
545*b0c632dbSHeiko Carstens 	   programming practice. */
546*b0c632dbSHeiko Carstens 
547*b0c632dbSHeiko Carstens 	return 0;
548*b0c632dbSHeiko Carstens }
549*b0c632dbSHeiko Carstens 
550*b0c632dbSHeiko Carstens gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
551*b0c632dbSHeiko Carstens {
552*b0c632dbSHeiko Carstens 	return gfn;
553*b0c632dbSHeiko Carstens }
554*b0c632dbSHeiko Carstens 
555*b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
556*b0c632dbSHeiko Carstens {
557*b0c632dbSHeiko Carstens 	return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
558*b0c632dbSHeiko Carstens }
559*b0c632dbSHeiko Carstens 
560*b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
561*b0c632dbSHeiko Carstens {
562*b0c632dbSHeiko Carstens 	kvm_exit();
563*b0c632dbSHeiko Carstens }
564*b0c632dbSHeiko Carstens 
565*b0c632dbSHeiko Carstens module_init(kvm_s390_init);
566*b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
567