xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 78c99ba1)
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  */
14 
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/hrtimer.h>
19 #include <linux/init.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/timer.h>
25 #include <asm/lowcore.h>
26 #include <asm/pgtable.h>
27 #include <asm/nmi.h>
28 #include "kvm-s390.h"
29 #include "gaccess.h"
30 
31 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
32 
33 struct kvm_stats_debugfs_item debugfs_entries[] = {
34 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
35 	{ "exit_null", VCPU_STAT(exit_null) },
36 	{ "exit_validity", VCPU_STAT(exit_validity) },
37 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
38 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
39 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
40 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
41 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
42 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
43 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
44 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
45 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
46 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
47 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
48 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
49 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
50 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
51 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
52 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
53 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
54 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
55 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
56 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
57 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
58 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
59 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
60 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
61 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
62 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
63 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
64 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
65 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
66 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
67 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
68 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
69 	{ NULL }
70 };
71 
72 
73 /* Section: not file related */
74 void kvm_arch_hardware_enable(void *garbage)
75 {
76 	/* every s390 is virtualization enabled ;-) */
77 }
78 
79 void kvm_arch_hardware_disable(void *garbage)
80 {
81 }
82 
83 int kvm_arch_hardware_setup(void)
84 {
85 	return 0;
86 }
87 
88 void kvm_arch_hardware_unsetup(void)
89 {
90 }
91 
92 void kvm_arch_check_processor_compat(void *rtn)
93 {
94 }
95 
96 int kvm_arch_init(void *opaque)
97 {
98 	return 0;
99 }
100 
101 void kvm_arch_exit(void)
102 {
103 }
104 
105 /* Section: device related */
106 long kvm_arch_dev_ioctl(struct file *filp,
107 			unsigned int ioctl, unsigned long arg)
108 {
109 	if (ioctl == KVM_S390_ENABLE_SIE)
110 		return s390_enable_sie();
111 	return -EINVAL;
112 }
113 
114 int kvm_dev_ioctl_check_extension(long ext)
115 {
116 	switch (ext) {
117 	default:
118 		return 0;
119 	}
120 }
121 
122 /* Section: vm related */
123 /*
124  * Get (and clear) the dirty memory log for a memory slot.
125  */
126 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
127 			       struct kvm_dirty_log *log)
128 {
129 	return 0;
130 }
131 
132 long kvm_arch_vm_ioctl(struct file *filp,
133 		       unsigned int ioctl, unsigned long arg)
134 {
135 	struct kvm *kvm = filp->private_data;
136 	void __user *argp = (void __user *)arg;
137 	int r;
138 
139 	switch (ioctl) {
140 	case KVM_S390_INTERRUPT: {
141 		struct kvm_s390_interrupt s390int;
142 
143 		r = -EFAULT;
144 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
145 			break;
146 		r = kvm_s390_inject_vm(kvm, &s390int);
147 		break;
148 	}
149 	default:
150 		r = -EINVAL;
151 	}
152 
153 	return r;
154 }
155 
156 struct kvm *kvm_arch_create_vm(void)
157 {
158 	struct kvm *kvm;
159 	int rc;
160 	char debug_name[16];
161 
162 	rc = s390_enable_sie();
163 	if (rc)
164 		goto out_nokvm;
165 
166 	rc = -ENOMEM;
167 	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
168 	if (!kvm)
169 		goto out_nokvm;
170 
171 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
172 	if (!kvm->arch.sca)
173 		goto out_nosca;
174 
175 	sprintf(debug_name, "kvm-%u", current->pid);
176 
177 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
178 	if (!kvm->arch.dbf)
179 		goto out_nodbf;
180 
181 	spin_lock_init(&kvm->arch.float_int.lock);
182 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
183 
184 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
185 	VM_EVENT(kvm, 3, "%s", "vm created");
186 
187 	return kvm;
188 out_nodbf:
189 	free_page((unsigned long)(kvm->arch.sca));
190 out_nosca:
191 	kfree(kvm);
192 out_nokvm:
193 	return ERR_PTR(rc);
194 }
195 
196 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
197 {
198 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
199 	if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
200 		(__u64) vcpu->arch.sie_block)
201 		vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
202 	smp_mb();
203 	free_page((unsigned long)(vcpu->arch.sie_block));
204 	kvm_vcpu_uninit(vcpu);
205 	kfree(vcpu);
206 }
207 
208 static void kvm_free_vcpus(struct kvm *kvm)
209 {
210 	unsigned int i;
211 
212 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
213 		if (kvm->vcpus[i]) {
214 			kvm_arch_vcpu_destroy(kvm->vcpus[i]);
215 			kvm->vcpus[i] = NULL;
216 		}
217 	}
218 }
219 
220 void kvm_arch_sync_events(struct kvm *kvm)
221 {
222 }
223 
224 void kvm_arch_destroy_vm(struct kvm *kvm)
225 {
226 	kvm_free_vcpus(kvm);
227 	kvm_free_physmem(kvm);
228 	free_page((unsigned long)(kvm->arch.sca));
229 	debug_unregister(kvm->arch.dbf);
230 	kfree(kvm);
231 }
232 
233 /* Section: vcpu related */
234 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
235 {
236 	return 0;
237 }
238 
239 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
240 {
241 	/* Nothing todo */
242 }
243 
244 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
245 {
246 	save_fp_regs(&vcpu->arch.host_fpregs);
247 	save_access_regs(vcpu->arch.host_acrs);
248 	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
249 	restore_fp_regs(&vcpu->arch.guest_fpregs);
250 	restore_access_regs(vcpu->arch.guest_acrs);
251 }
252 
253 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
254 {
255 	save_fp_regs(&vcpu->arch.guest_fpregs);
256 	save_access_regs(vcpu->arch.guest_acrs);
257 	restore_fp_regs(&vcpu->arch.host_fpregs);
258 	restore_access_regs(vcpu->arch.host_acrs);
259 }
260 
261 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
262 {
263 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
264 	vcpu->arch.sie_block->gpsw.mask = 0UL;
265 	vcpu->arch.sie_block->gpsw.addr = 0UL;
266 	vcpu->arch.sie_block->prefix    = 0UL;
267 	vcpu->arch.sie_block->ihcpu     = 0xffff;
268 	vcpu->arch.sie_block->cputm     = 0UL;
269 	vcpu->arch.sie_block->ckc       = 0UL;
270 	vcpu->arch.sie_block->todpr     = 0;
271 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
272 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
273 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
274 	vcpu->arch.guest_fpregs.fpc = 0;
275 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
276 	vcpu->arch.sie_block->gbea = 1;
277 }
278 
279 /* The current code can have up to 256 pages for virtio */
280 #define VIRTIODESCSPACE (256ul * 4096ul)
281 
282 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
283 {
284 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
285 	vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
286 				      vcpu->kvm->arch.guest_origin +
287 				      VIRTIODESCSPACE - 1ul;
288 	vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
289 	vcpu->arch.sie_block->ecb   = 2;
290 	vcpu->arch.sie_block->eca   = 0xC1002001U;
291 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
292 	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
293 		     (unsigned long) vcpu);
294 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
295 	get_cpu_id(&vcpu->arch.cpu_id);
296 	vcpu->arch.cpu_id.version = 0xff;
297 	return 0;
298 }
299 
300 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
301 				      unsigned int id)
302 {
303 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
304 	int rc = -ENOMEM;
305 
306 	if (!vcpu)
307 		goto out_nomem;
308 
309 	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
310 					get_zeroed_page(GFP_KERNEL);
311 
312 	if (!vcpu->arch.sie_block)
313 		goto out_free_cpu;
314 
315 	vcpu->arch.sie_block->icpua = id;
316 	BUG_ON(!kvm->arch.sca);
317 	if (!kvm->arch.sca->cpu[id].sda)
318 		kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
319 	else
320 		BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
321 	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322 	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
323 
324 	spin_lock_init(&vcpu->arch.local_int.lock);
325 	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
326 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
327 	spin_lock(&kvm->arch.float_int.lock);
328 	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
329 	init_waitqueue_head(&vcpu->arch.local_int.wq);
330 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
331 	spin_unlock(&kvm->arch.float_int.lock);
332 
333 	rc = kvm_vcpu_init(vcpu, kvm, id);
334 	if (rc)
335 		goto out_free_cpu;
336 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
337 		 vcpu->arch.sie_block);
338 
339 	return vcpu;
340 out_free_cpu:
341 	kfree(vcpu);
342 out_nomem:
343 	return ERR_PTR(rc);
344 }
345 
346 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
347 {
348 	/* kvm common code refers to this, but never calls it */
349 	BUG();
350 	return 0;
351 }
352 
353 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
354 {
355 	vcpu_load(vcpu);
356 	kvm_s390_vcpu_initial_reset(vcpu);
357 	vcpu_put(vcpu);
358 	return 0;
359 }
360 
361 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
362 {
363 	vcpu_load(vcpu);
364 	memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
365 	vcpu_put(vcpu);
366 	return 0;
367 }
368 
369 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370 {
371 	vcpu_load(vcpu);
372 	memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
373 	vcpu_put(vcpu);
374 	return 0;
375 }
376 
377 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
378 				  struct kvm_sregs *sregs)
379 {
380 	vcpu_load(vcpu);
381 	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
382 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
383 	vcpu_put(vcpu);
384 	return 0;
385 }
386 
387 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388 				  struct kvm_sregs *sregs)
389 {
390 	vcpu_load(vcpu);
391 	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
392 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
393 	vcpu_put(vcpu);
394 	return 0;
395 }
396 
397 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398 {
399 	vcpu_load(vcpu);
400 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
402 	vcpu_put(vcpu);
403 	return 0;
404 }
405 
406 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
407 {
408 	vcpu_load(vcpu);
409 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
411 	vcpu_put(vcpu);
412 	return 0;
413 }
414 
415 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
416 {
417 	int rc = 0;
418 
419 	vcpu_load(vcpu);
420 	if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
421 		rc = -EBUSY;
422 	else
423 		vcpu->arch.sie_block->gpsw = psw;
424 	vcpu_put(vcpu);
425 	return rc;
426 }
427 
428 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
429 				  struct kvm_translation *tr)
430 {
431 	return -EINVAL; /* not implemented yet */
432 }
433 
434 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
435 					struct kvm_guest_debug *dbg)
436 {
437 	return -EINVAL; /* not implemented yet */
438 }
439 
440 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
441 				    struct kvm_mp_state *mp_state)
442 {
443 	return -EINVAL; /* not implemented yet */
444 }
445 
446 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
447 				    struct kvm_mp_state *mp_state)
448 {
449 	return -EINVAL; /* not implemented yet */
450 }
451 
452 static void __vcpu_run(struct kvm_vcpu *vcpu)
453 {
454 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
455 
456 	if (need_resched())
457 		schedule();
458 
459 	if (test_thread_flag(TIF_MCCK_PENDING))
460 		s390_handle_mcck();
461 
462 	kvm_s390_deliver_pending_interrupts(vcpu);
463 
464 	vcpu->arch.sie_block->icptcode = 0;
465 	local_irq_disable();
466 	kvm_guest_enter();
467 	local_irq_enable();
468 	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
469 		   atomic_read(&vcpu->arch.sie_block->cpuflags));
470 	if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
471 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
472 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
473 	}
474 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
475 		   vcpu->arch.sie_block->icptcode);
476 	local_irq_disable();
477 	kvm_guest_exit();
478 	local_irq_enable();
479 
480 	memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
481 }
482 
483 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
484 {
485 	int rc;
486 	sigset_t sigsaved;
487 
488 	vcpu_load(vcpu);
489 
490 	/* verify, that memory has been registered */
491 	if (!vcpu->kvm->arch.guest_memsize) {
492 		vcpu_put(vcpu);
493 		return -EINVAL;
494 	}
495 
496 	if (vcpu->sigset_active)
497 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
498 
499 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
500 
501 	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
502 
503 	switch (kvm_run->exit_reason) {
504 	case KVM_EXIT_S390_SIEIC:
505 		vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
506 		vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
507 		break;
508 	case KVM_EXIT_UNKNOWN:
509 	case KVM_EXIT_S390_RESET:
510 		break;
511 	default:
512 		BUG();
513 	}
514 
515 	might_sleep();
516 
517 	do {
518 		__vcpu_run(vcpu);
519 		rc = kvm_handle_sie_intercept(vcpu);
520 	} while (!signal_pending(current) && !rc);
521 
522 	if (signal_pending(current) && !rc)
523 		rc = -EINTR;
524 
525 	if (rc == -ENOTSUPP) {
526 		/* intercept cannot be handled in-kernel, prepare kvm-run */
527 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
528 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
529 		kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
530 		kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
531 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
532 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
533 		rc = 0;
534 	}
535 
536 	if (rc == -EREMOTE) {
537 		/* intercept was handled, but userspace support is needed
538 		 * kvm_run has been prepared by the handler */
539 		rc = 0;
540 	}
541 
542 	if (vcpu->sigset_active)
543 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
544 
545 	vcpu_put(vcpu);
546 
547 	vcpu->stat.exit_userspace++;
548 	return rc;
549 }
550 
551 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
552 		       unsigned long n, int prefix)
553 {
554 	if (prefix)
555 		return copy_to_guest(vcpu, guestdest, from, n);
556 	else
557 		return copy_to_guest_absolute(vcpu, guestdest, from, n);
558 }
559 
560 /*
561  * store status at address
562  * we use have two special cases:
563  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
564  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
565  */
566 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
567 {
568 	const unsigned char archmode = 1;
569 	int prefix;
570 
571 	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
572 		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
573 			return -EFAULT;
574 		addr = SAVE_AREA_BASE;
575 		prefix = 0;
576 	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
577 		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
578 			return -EFAULT;
579 		addr = SAVE_AREA_BASE;
580 		prefix = 1;
581 	} else
582 		prefix = 0;
583 
584 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
585 			vcpu->arch.guest_fpregs.fprs, 128, prefix))
586 		return -EFAULT;
587 
588 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
589 			vcpu->arch.guest_gprs, 128, prefix))
590 		return -EFAULT;
591 
592 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
593 			&vcpu->arch.sie_block->gpsw, 16, prefix))
594 		return -EFAULT;
595 
596 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
597 			&vcpu->arch.sie_block->prefix, 4, prefix))
598 		return -EFAULT;
599 
600 	if (__guestcopy(vcpu,
601 			addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
602 			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
603 		return -EFAULT;
604 
605 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
606 			&vcpu->arch.sie_block->todpr, 4, prefix))
607 		return -EFAULT;
608 
609 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
610 			&vcpu->arch.sie_block->cputm, 8, prefix))
611 		return -EFAULT;
612 
613 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
614 			&vcpu->arch.sie_block->ckc, 8, prefix))
615 		return -EFAULT;
616 
617 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
618 			&vcpu->arch.guest_acrs, 64, prefix))
619 		return -EFAULT;
620 
621 	if (__guestcopy(vcpu,
622 			addr + offsetof(struct save_area_s390x, ctrl_regs),
623 			&vcpu->arch.sie_block->gcr, 128, prefix))
624 		return -EFAULT;
625 	return 0;
626 }
627 
628 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
629 {
630 	int rc;
631 
632 	vcpu_load(vcpu);
633 	rc = __kvm_s390_vcpu_store_status(vcpu, addr);
634 	vcpu_put(vcpu);
635 	return rc;
636 }
637 
638 long kvm_arch_vcpu_ioctl(struct file *filp,
639 			 unsigned int ioctl, unsigned long arg)
640 {
641 	struct kvm_vcpu *vcpu = filp->private_data;
642 	void __user *argp = (void __user *)arg;
643 
644 	switch (ioctl) {
645 	case KVM_S390_INTERRUPT: {
646 		struct kvm_s390_interrupt s390int;
647 
648 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
649 			return -EFAULT;
650 		return kvm_s390_inject_vcpu(vcpu, &s390int);
651 	}
652 	case KVM_S390_STORE_STATUS:
653 		return kvm_s390_vcpu_store_status(vcpu, arg);
654 	case KVM_S390_SET_INITIAL_PSW: {
655 		psw_t psw;
656 
657 		if (copy_from_user(&psw, argp, sizeof(psw)))
658 			return -EFAULT;
659 		return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
660 	}
661 	case KVM_S390_INITIAL_RESET:
662 		return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
663 	default:
664 		;
665 	}
666 	return -EINVAL;
667 }
668 
669 /* Section: memory related */
670 int kvm_arch_set_memory_region(struct kvm *kvm,
671 				struct kvm_userspace_memory_region *mem,
672 				struct kvm_memory_slot old,
673 				int user_alloc)
674 {
675 	int i;
676 
677 	/* A few sanity checks. We can have exactly one memory slot which has
678 	   to start at guest virtual zero and which has to be located at a
679 	   page boundary in userland and which has to end at a page boundary.
680 	   The memory in userland is ok to be fragmented into various different
681 	   vmas. It is okay to mmap() and munmap() stuff in this slot after
682 	   doing this call at any time */
683 
684 	if (mem->slot || kvm->arch.guest_memsize)
685 		return -EINVAL;
686 
687 	if (mem->guest_phys_addr)
688 		return -EINVAL;
689 
690 	if (mem->userspace_addr & (PAGE_SIZE - 1))
691 		return -EINVAL;
692 
693 	if (mem->memory_size & (PAGE_SIZE - 1))
694 		return -EINVAL;
695 
696 	if (!user_alloc)
697 		return -EINVAL;
698 
699 	/* lock all vcpus */
700 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
701 		if (!kvm->vcpus[i])
702 			continue;
703 		if (!mutex_trylock(&kvm->vcpus[i]->mutex))
704 			goto fail_out;
705 	}
706 
707 	kvm->arch.guest_origin = mem->userspace_addr;
708 	kvm->arch.guest_memsize = mem->memory_size;
709 
710 	/* update sie control blocks, and unlock all vcpus */
711 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
712 		if (kvm->vcpus[i]) {
713 			kvm->vcpus[i]->arch.sie_block->gmsor =
714 				kvm->arch.guest_origin;
715 			kvm->vcpus[i]->arch.sie_block->gmslm =
716 				kvm->arch.guest_memsize +
717 				kvm->arch.guest_origin +
718 				VIRTIODESCSPACE - 1ul;
719 			mutex_unlock(&kvm->vcpus[i]->mutex);
720 		}
721 	}
722 
723 	return 0;
724 
725 fail_out:
726 	for (; i >= 0; i--)
727 		mutex_unlock(&kvm->vcpus[i]->mutex);
728 	return -EINVAL;
729 }
730 
731 void kvm_arch_flush_shadow(struct kvm *kvm)
732 {
733 }
734 
735 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
736 {
737 	return gfn;
738 }
739 
740 static int __init kvm_s390_init(void)
741 {
742 	return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
743 }
744 
745 static void __exit kvm_s390_exit(void)
746 {
747 	kvm_exit();
748 }
749 
750 module_init(kvm_s390_init);
751 module_exit(kvm_s390_exit);
752