xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 9ac8d3fb)
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  */
14 
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
26 
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29 
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31 
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
34 	{ "exit_null", VCPU_STAT(exit_null) },
35 	{ "exit_validity", VCPU_STAT(exit_validity) },
36 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
37 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
38 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
39 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
40 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
43 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
52 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
53 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
54 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
55 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
56 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
58 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
59 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
60 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
61 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
67 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
68 	{ NULL }
69 };
70 
71 
72 /* Section: not file related */
73 void kvm_arch_hardware_enable(void *garbage)
74 {
75 	/* every s390 is virtualization enabled ;-) */
76 }
77 
78 void kvm_arch_hardware_disable(void *garbage)
79 {
80 }
81 
82 int kvm_arch_hardware_setup(void)
83 {
84 	return 0;
85 }
86 
87 void kvm_arch_hardware_unsetup(void)
88 {
89 }
90 
91 void kvm_arch_check_processor_compat(void *rtn)
92 {
93 }
94 
95 int kvm_arch_init(void *opaque)
96 {
97 	return 0;
98 }
99 
100 void kvm_arch_exit(void)
101 {
102 }
103 
104 /* Section: device related */
105 long kvm_arch_dev_ioctl(struct file *filp,
106 			unsigned int ioctl, unsigned long arg)
107 {
108 	if (ioctl == KVM_S390_ENABLE_SIE)
109 		return s390_enable_sie();
110 	return -EINVAL;
111 }
112 
113 int kvm_dev_ioctl_check_extension(long ext)
114 {
115 	switch (ext) {
116 	case KVM_CAP_USER_MEMORY:
117 		return 1;
118 	default:
119 		return 0;
120 	}
121 }
122 
123 /* Section: vm related */
124 /*
125  * Get (and clear) the dirty memory log for a memory slot.
126  */
127 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
128 			       struct kvm_dirty_log *log)
129 {
130 	return 0;
131 }
132 
133 long kvm_arch_vm_ioctl(struct file *filp,
134 		       unsigned int ioctl, unsigned long arg)
135 {
136 	struct kvm *kvm = filp->private_data;
137 	void __user *argp = (void __user *)arg;
138 	int r;
139 
140 	switch (ioctl) {
141 	case KVM_S390_INTERRUPT: {
142 		struct kvm_s390_interrupt s390int;
143 
144 		r = -EFAULT;
145 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
146 			break;
147 		r = kvm_s390_inject_vm(kvm, &s390int);
148 		break;
149 	}
150 	default:
151 		r = -EINVAL;
152 	}
153 
154 	return r;
155 }
156 
157 struct kvm *kvm_arch_create_vm(void)
158 {
159 	struct kvm *kvm;
160 	int rc;
161 	char debug_name[16];
162 
163 	rc = s390_enable_sie();
164 	if (rc)
165 		goto out_nokvm;
166 
167 	rc = -ENOMEM;
168 	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
169 	if (!kvm)
170 		goto out_nokvm;
171 
172 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
173 	if (!kvm->arch.sca)
174 		goto out_nosca;
175 
176 	sprintf(debug_name, "kvm-%u", current->pid);
177 
178 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
179 	if (!kvm->arch.dbf)
180 		goto out_nodbf;
181 
182 	spin_lock_init(&kvm->arch.float_int.lock);
183 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
184 
185 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
186 	VM_EVENT(kvm, 3, "%s", "vm created");
187 
188 	try_module_get(THIS_MODULE);
189 
190 	return kvm;
191 out_nodbf:
192 	free_page((unsigned long)(kvm->arch.sca));
193 out_nosca:
194 	kfree(kvm);
195 out_nokvm:
196 	return ERR_PTR(rc);
197 }
198 
199 void kvm_arch_destroy_vm(struct kvm *kvm)
200 {
201 	debug_unregister(kvm->arch.dbf);
202 	kvm_free_physmem(kvm);
203 	free_page((unsigned long)(kvm->arch.sca));
204 	kfree(kvm);
205 	module_put(THIS_MODULE);
206 }
207 
208 /* Section: vcpu related */
209 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
210 {
211 	return 0;
212 }
213 
214 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
215 {
216 	/* kvm common code refers to this, but does'nt call it */
217 	BUG();
218 }
219 
220 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
221 {
222 	save_fp_regs(&vcpu->arch.host_fpregs);
223 	save_access_regs(vcpu->arch.host_acrs);
224 	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
225 	restore_fp_regs(&vcpu->arch.guest_fpregs);
226 	restore_access_regs(vcpu->arch.guest_acrs);
227 }
228 
229 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
230 {
231 	save_fp_regs(&vcpu->arch.guest_fpregs);
232 	save_access_regs(vcpu->arch.guest_acrs);
233 	restore_fp_regs(&vcpu->arch.host_fpregs);
234 	restore_access_regs(vcpu->arch.host_acrs);
235 }
236 
237 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
238 {
239 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
240 	vcpu->arch.sie_block->gpsw.mask = 0UL;
241 	vcpu->arch.sie_block->gpsw.addr = 0UL;
242 	vcpu->arch.sie_block->prefix    = 0UL;
243 	vcpu->arch.sie_block->ihcpu     = 0xffff;
244 	vcpu->arch.sie_block->cputm     = 0UL;
245 	vcpu->arch.sie_block->ckc       = 0UL;
246 	vcpu->arch.sie_block->todpr     = 0;
247 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
248 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
249 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
250 	vcpu->arch.guest_fpregs.fpc = 0;
251 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
252 	vcpu->arch.sie_block->gbea = 1;
253 }
254 
255 /* The current code can have up to 256 pages for virtio */
256 #define VIRTIODESCSPACE (256ul * 4096ul)
257 
258 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
259 {
260 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
261 	vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
262 				      vcpu->kvm->arch.guest_origin +
263 				      VIRTIODESCSPACE - 1ul;
264 	vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
265 	vcpu->arch.sie_block->ecb   = 2;
266 	vcpu->arch.sie_block->eca   = 0xC1002001U;
267 	setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
268 		 (unsigned long) vcpu);
269 	get_cpu_id(&vcpu->arch.cpu_id);
270 	vcpu->arch.cpu_id.version = 0xfe;
271 	return 0;
272 }
273 
274 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
275 				      unsigned int id)
276 {
277 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
278 	int rc = -ENOMEM;
279 
280 	if (!vcpu)
281 		goto out_nomem;
282 
283 	vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
284 					get_zeroed_page(GFP_KERNEL);
285 
286 	if (!vcpu->arch.sie_block)
287 		goto out_free_cpu;
288 
289 	vcpu->arch.sie_block->icpua = id;
290 	BUG_ON(!kvm->arch.sca);
291 	BUG_ON(kvm->arch.sca->cpu[id].sda);
292 	kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
293 	vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
294 	vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
295 
296 	spin_lock_init(&vcpu->arch.local_int.lock);
297 	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
298 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
299 	spin_lock_bh(&kvm->arch.float_int.lock);
300 	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
301 	init_waitqueue_head(&vcpu->arch.local_int.wq);
302 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
303 	spin_unlock_bh(&kvm->arch.float_int.lock);
304 
305 	rc = kvm_vcpu_init(vcpu, kvm, id);
306 	if (rc)
307 		goto out_free_cpu;
308 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
309 		 vcpu->arch.sie_block);
310 
311 	try_module_get(THIS_MODULE);
312 
313 	return vcpu;
314 out_free_cpu:
315 	kfree(vcpu);
316 out_nomem:
317 	return ERR_PTR(rc);
318 }
319 
320 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
321 {
322 	VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
323 	free_page((unsigned long)(vcpu->arch.sie_block));
324 	kfree(vcpu);
325 	module_put(THIS_MODULE);
326 }
327 
328 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
329 {
330 	/* kvm common code refers to this, but never calls it */
331 	BUG();
332 	return 0;
333 }
334 
335 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
336 {
337 	vcpu_load(vcpu);
338 	kvm_s390_vcpu_initial_reset(vcpu);
339 	vcpu_put(vcpu);
340 	return 0;
341 }
342 
343 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
344 {
345 	vcpu_load(vcpu);
346 	memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
347 	vcpu_put(vcpu);
348 	return 0;
349 }
350 
351 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
352 {
353 	vcpu_load(vcpu);
354 	memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
355 	vcpu_put(vcpu);
356 	return 0;
357 }
358 
359 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
360 				  struct kvm_sregs *sregs)
361 {
362 	vcpu_load(vcpu);
363 	memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
364 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
365 	vcpu_put(vcpu);
366 	return 0;
367 }
368 
369 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
370 				  struct kvm_sregs *sregs)
371 {
372 	vcpu_load(vcpu);
373 	memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
374 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
375 	vcpu_put(vcpu);
376 	return 0;
377 }
378 
379 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
380 {
381 	vcpu_load(vcpu);
382 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
383 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
384 	vcpu_put(vcpu);
385 	return 0;
386 }
387 
388 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
389 {
390 	vcpu_load(vcpu);
391 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
392 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
393 	vcpu_put(vcpu);
394 	return 0;
395 }
396 
397 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
398 {
399 	int rc = 0;
400 
401 	vcpu_load(vcpu);
402 	if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
403 		rc = -EBUSY;
404 	else
405 		vcpu->arch.sie_block->gpsw = psw;
406 	vcpu_put(vcpu);
407 	return rc;
408 }
409 
410 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
411 				  struct kvm_translation *tr)
412 {
413 	return -EINVAL; /* not implemented yet */
414 }
415 
416 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
417 				    struct kvm_debug_guest *dbg)
418 {
419 	return -EINVAL; /* not implemented yet */
420 }
421 
422 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
423 				    struct kvm_mp_state *mp_state)
424 {
425 	return -EINVAL; /* not implemented yet */
426 }
427 
428 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
429 				    struct kvm_mp_state *mp_state)
430 {
431 	return -EINVAL; /* not implemented yet */
432 }
433 
434 extern void s390_handle_mcck(void);
435 
436 static void __vcpu_run(struct kvm_vcpu *vcpu)
437 {
438 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
439 
440 	if (need_resched())
441 		schedule();
442 
443 	if (test_thread_flag(TIF_MCCK_PENDING))
444 		s390_handle_mcck();
445 
446 	kvm_s390_deliver_pending_interrupts(vcpu);
447 
448 	vcpu->arch.sie_block->icptcode = 0;
449 	local_irq_disable();
450 	kvm_guest_enter();
451 	local_irq_enable();
452 	VCPU_EVENT(vcpu, 6, "entering sie flags %x",
453 		   atomic_read(&vcpu->arch.sie_block->cpuflags));
454 	if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
455 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
456 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
457 	}
458 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
459 		   vcpu->arch.sie_block->icptcode);
460 	local_irq_disable();
461 	kvm_guest_exit();
462 	local_irq_enable();
463 
464 	memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
465 }
466 
467 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
468 {
469 	int rc;
470 	sigset_t sigsaved;
471 
472 	vcpu_load(vcpu);
473 
474 	if (vcpu->sigset_active)
475 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
476 
477 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
478 
479 	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
480 
481 	switch (kvm_run->exit_reason) {
482 	case KVM_EXIT_S390_SIEIC:
483 		vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
484 		vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
485 		break;
486 	case KVM_EXIT_UNKNOWN:
487 	case KVM_EXIT_S390_RESET:
488 		break;
489 	default:
490 		BUG();
491 	}
492 
493 	might_sleep();
494 
495 	do {
496 		__vcpu_run(vcpu);
497 		rc = kvm_handle_sie_intercept(vcpu);
498 	} while (!signal_pending(current) && !rc);
499 
500 	if (signal_pending(current) && !rc)
501 		rc = -EINTR;
502 
503 	if (rc == -ENOTSUPP) {
504 		/* intercept cannot be handled in-kernel, prepare kvm-run */
505 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
506 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
507 		kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
508 		kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
509 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
510 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
511 		rc = 0;
512 	}
513 
514 	if (rc == -EREMOTE) {
515 		/* intercept was handled, but userspace support is needed
516 		 * kvm_run has been prepared by the handler */
517 		rc = 0;
518 	}
519 
520 	if (vcpu->sigset_active)
521 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
522 
523 	vcpu_put(vcpu);
524 
525 	vcpu->stat.exit_userspace++;
526 	return rc;
527 }
528 
529 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
530 		       unsigned long n, int prefix)
531 {
532 	if (prefix)
533 		return copy_to_guest(vcpu, guestdest, from, n);
534 	else
535 		return copy_to_guest_absolute(vcpu, guestdest, from, n);
536 }
537 
538 /*
539  * store status at address
540  * we use have two special cases:
541  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
542  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
543  */
544 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
545 {
546 	const unsigned char archmode = 1;
547 	int prefix;
548 
549 	if (addr == KVM_S390_STORE_STATUS_NOADDR) {
550 		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
551 			return -EFAULT;
552 		addr = SAVE_AREA_BASE;
553 		prefix = 0;
554 	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
555 		if (copy_to_guest(vcpu, 163ul, &archmode, 1))
556 			return -EFAULT;
557 		addr = SAVE_AREA_BASE;
558 		prefix = 1;
559 	} else
560 		prefix = 0;
561 
562 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
563 			vcpu->arch.guest_fpregs.fprs, 128, prefix))
564 		return -EFAULT;
565 
566 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
567 			vcpu->arch.guest_gprs, 128, prefix))
568 		return -EFAULT;
569 
570 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
571 			&vcpu->arch.sie_block->gpsw, 16, prefix))
572 		return -EFAULT;
573 
574 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
575 			&vcpu->arch.sie_block->prefix, 4, prefix))
576 		return -EFAULT;
577 
578 	if (__guestcopy(vcpu,
579 			addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
580 			&vcpu->arch.guest_fpregs.fpc, 4, prefix))
581 		return -EFAULT;
582 
583 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
584 			&vcpu->arch.sie_block->todpr, 4, prefix))
585 		return -EFAULT;
586 
587 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
588 			&vcpu->arch.sie_block->cputm, 8, prefix))
589 		return -EFAULT;
590 
591 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
592 			&vcpu->arch.sie_block->ckc, 8, prefix))
593 		return -EFAULT;
594 
595 	if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
596 			&vcpu->arch.guest_acrs, 64, prefix))
597 		return -EFAULT;
598 
599 	if (__guestcopy(vcpu,
600 			addr + offsetof(struct save_area_s390x, ctrl_regs),
601 			&vcpu->arch.sie_block->gcr, 128, prefix))
602 		return -EFAULT;
603 	return 0;
604 }
605 
606 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
607 {
608 	int rc;
609 
610 	vcpu_load(vcpu);
611 	rc = __kvm_s390_vcpu_store_status(vcpu, addr);
612 	vcpu_put(vcpu);
613 	return rc;
614 }
615 
616 long kvm_arch_vcpu_ioctl(struct file *filp,
617 			 unsigned int ioctl, unsigned long arg)
618 {
619 	struct kvm_vcpu *vcpu = filp->private_data;
620 	void __user *argp = (void __user *)arg;
621 
622 	switch (ioctl) {
623 	case KVM_S390_INTERRUPT: {
624 		struct kvm_s390_interrupt s390int;
625 
626 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
627 			return -EFAULT;
628 		return kvm_s390_inject_vcpu(vcpu, &s390int);
629 	}
630 	case KVM_S390_STORE_STATUS:
631 		return kvm_s390_vcpu_store_status(vcpu, arg);
632 	case KVM_S390_SET_INITIAL_PSW: {
633 		psw_t psw;
634 
635 		if (copy_from_user(&psw, argp, sizeof(psw)))
636 			return -EFAULT;
637 		return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
638 	}
639 	case KVM_S390_INITIAL_RESET:
640 		return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
641 	default:
642 		;
643 	}
644 	return -EINVAL;
645 }
646 
647 /* Section: memory related */
648 int kvm_arch_set_memory_region(struct kvm *kvm,
649 				struct kvm_userspace_memory_region *mem,
650 				struct kvm_memory_slot old,
651 				int user_alloc)
652 {
653 	/* A few sanity checks. We can have exactly one memory slot which has
654 	   to start at guest virtual zero and which has to be located at a
655 	   page boundary in userland and which has to end at a page boundary.
656 	   The memory in userland is ok to be fragmented into various different
657 	   vmas. It is okay to mmap() and munmap() stuff in this slot after
658 	   doing this call at any time */
659 
660 	if (mem->slot)
661 		return -EINVAL;
662 
663 	if (mem->guest_phys_addr)
664 		return -EINVAL;
665 
666 	if (mem->userspace_addr & (PAGE_SIZE - 1))
667 		return -EINVAL;
668 
669 	if (mem->memory_size & (PAGE_SIZE - 1))
670 		return -EINVAL;
671 
672 	kvm->arch.guest_origin = mem->userspace_addr;
673 	kvm->arch.guest_memsize = mem->memory_size;
674 
675 	/* FIXME: we do want to interrupt running CPUs and update their memory
676 	   configuration now to avoid race conditions. But hey, changing the
677 	   memory layout while virtual CPUs are running is usually bad
678 	   programming practice. */
679 
680 	return 0;
681 }
682 
683 void kvm_arch_flush_shadow(struct kvm *kvm)
684 {
685 }
686 
687 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
688 {
689 	return gfn;
690 }
691 
692 static int __init kvm_s390_init(void)
693 {
694 	return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
695 }
696 
697 static void __exit kvm_s390_exit(void)
698 {
699 	kvm_exit();
700 }
701 
702 module_init(kvm_s390_init);
703 module_exit(kvm_s390_exit);
704