xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision a8fe58ce)
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16 
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
31 #include <asm/etr.h>
32 #include <asm/pgtable.h>
33 #include <asm/nmi.h>
34 #include <asm/switch_to.h>
35 #include <asm/isc.h>
36 #include <asm/sclp.h>
37 #include "kvm-s390.h"
38 #include "gaccess.h"
39 
40 #define KMSG_COMPONENT "kvm-s390"
41 #undef pr_fmt
42 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 
44 #define CREATE_TRACE_POINTS
45 #include "trace.h"
46 #include "trace-s390.h"
47 
48 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
49 #define LOCAL_IRQS 32
50 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
52 
53 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54 
55 struct kvm_stats_debugfs_item debugfs_entries[] = {
56 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
57 	{ "exit_null", VCPU_STAT(exit_null) },
58 	{ "exit_validity", VCPU_STAT(exit_validity) },
59 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
61 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
62 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
63 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
67 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
68 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
69 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
70 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
72 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
73 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
74 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
81 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
82 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
84 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
86 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
87 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
88 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
90 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
91 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
93 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
94 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
95 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
96 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
97 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
98 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
100 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
101 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
103 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
104 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
107 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
110 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
111 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
112 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
113 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
114 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
115 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
116 	{ NULL }
117 };
118 
119 /* upper facilities limit for kvm */
120 unsigned long kvm_s390_fac_list_mask[] = {
121 	0xffe6fffbfcfdfc40UL,
122 	0x005e800000000000UL,
123 };
124 
125 unsigned long kvm_s390_fac_list_mask_size(void)
126 {
127 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
129 }
130 
131 static struct gmap_notifier gmap_notifier;
132 debug_info_t *kvm_s390_dbf;
133 
134 /* Section: not file related */
135 int kvm_arch_hardware_enable(void)
136 {
137 	/* every s390 is virtualization enabled ;-) */
138 	return 0;
139 }
140 
141 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142 
143 /*
144  * This callback is executed during stop_machine(). All CPUs are therefore
145  * temporarily stopped. In order not to change guest behavior, we have to
146  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147  * so a CPU won't be stopped while calculating with the epoch.
148  */
149 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 			  void *v)
151 {
152 	struct kvm *kvm;
153 	struct kvm_vcpu *vcpu;
154 	int i;
155 	unsigned long long *delta = v;
156 
157 	list_for_each_entry(kvm, &vm_list, vm_list) {
158 		kvm->arch.epoch -= *delta;
159 		kvm_for_each_vcpu(i, vcpu, kvm) {
160 			vcpu->arch.sie_block->epoch -= *delta;
161 		}
162 	}
163 	return NOTIFY_OK;
164 }
165 
166 static struct notifier_block kvm_clock_notifier = {
167 	.notifier_call = kvm_clock_sync,
168 };
169 
170 int kvm_arch_hardware_setup(void)
171 {
172 	gmap_notifier.notifier_call = kvm_gmap_notifier;
173 	gmap_register_ipte_notifier(&gmap_notifier);
174 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 				       &kvm_clock_notifier);
176 	return 0;
177 }
178 
179 void kvm_arch_hardware_unsetup(void)
180 {
181 	gmap_unregister_ipte_notifier(&gmap_notifier);
182 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 					 &kvm_clock_notifier);
184 }
185 
186 int kvm_arch_init(void *opaque)
187 {
188 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 	if (!kvm_s390_dbf)
190 		return -ENOMEM;
191 
192 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 		debug_unregister(kvm_s390_dbf);
194 		return -ENOMEM;
195 	}
196 
197 	/* Register floating interrupt controller interface. */
198 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
199 }
200 
201 void kvm_arch_exit(void)
202 {
203 	debug_unregister(kvm_s390_dbf);
204 }
205 
206 /* Section: device related */
207 long kvm_arch_dev_ioctl(struct file *filp,
208 			unsigned int ioctl, unsigned long arg)
209 {
210 	if (ioctl == KVM_S390_ENABLE_SIE)
211 		return s390_enable_sie();
212 	return -EINVAL;
213 }
214 
215 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
216 {
217 	int r;
218 
219 	switch (ext) {
220 	case KVM_CAP_S390_PSW:
221 	case KVM_CAP_S390_GMAP:
222 	case KVM_CAP_SYNC_MMU:
223 #ifdef CONFIG_KVM_S390_UCONTROL
224 	case KVM_CAP_S390_UCONTROL:
225 #endif
226 	case KVM_CAP_ASYNC_PF:
227 	case KVM_CAP_SYNC_REGS:
228 	case KVM_CAP_ONE_REG:
229 	case KVM_CAP_ENABLE_CAP:
230 	case KVM_CAP_S390_CSS_SUPPORT:
231 	case KVM_CAP_IOEVENTFD:
232 	case KVM_CAP_DEVICE_CTRL:
233 	case KVM_CAP_ENABLE_CAP_VM:
234 	case KVM_CAP_S390_IRQCHIP:
235 	case KVM_CAP_VM_ATTRIBUTES:
236 	case KVM_CAP_MP_STATE:
237 	case KVM_CAP_S390_INJECT_IRQ:
238 	case KVM_CAP_S390_USER_SIGP:
239 	case KVM_CAP_S390_USER_STSI:
240 	case KVM_CAP_S390_SKEYS:
241 	case KVM_CAP_S390_IRQ_STATE:
242 		r = 1;
243 		break;
244 	case KVM_CAP_S390_MEM_OP:
245 		r = MEM_OP_MAX_SIZE;
246 		break;
247 	case KVM_CAP_NR_VCPUS:
248 	case KVM_CAP_MAX_VCPUS:
249 		r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
250 				  : KVM_S390_BSCA_CPU_SLOTS;
251 		break;
252 	case KVM_CAP_NR_MEMSLOTS:
253 		r = KVM_USER_MEM_SLOTS;
254 		break;
255 	case KVM_CAP_S390_COW:
256 		r = MACHINE_HAS_ESOP;
257 		break;
258 	case KVM_CAP_S390_VECTOR_REGISTERS:
259 		r = MACHINE_HAS_VX;
260 		break;
261 	case KVM_CAP_S390_RI:
262 		r = test_facility(64);
263 		break;
264 	default:
265 		r = 0;
266 	}
267 	return r;
268 }
269 
270 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
271 					struct kvm_memory_slot *memslot)
272 {
273 	gfn_t cur_gfn, last_gfn;
274 	unsigned long address;
275 	struct gmap *gmap = kvm->arch.gmap;
276 
277 	down_read(&gmap->mm->mmap_sem);
278 	/* Loop over all guest pages */
279 	last_gfn = memslot->base_gfn + memslot->npages;
280 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
281 		address = gfn_to_hva_memslot(memslot, cur_gfn);
282 
283 		if (gmap_test_and_clear_dirty(address, gmap))
284 			mark_page_dirty(kvm, cur_gfn);
285 	}
286 	up_read(&gmap->mm->mmap_sem);
287 }
288 
289 /* Section: vm related */
290 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
291 
292 /*
293  * Get (and clear) the dirty memory log for a memory slot.
294  */
295 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
296 			       struct kvm_dirty_log *log)
297 {
298 	int r;
299 	unsigned long n;
300 	struct kvm_memslots *slots;
301 	struct kvm_memory_slot *memslot;
302 	int is_dirty = 0;
303 
304 	mutex_lock(&kvm->slots_lock);
305 
306 	r = -EINVAL;
307 	if (log->slot >= KVM_USER_MEM_SLOTS)
308 		goto out;
309 
310 	slots = kvm_memslots(kvm);
311 	memslot = id_to_memslot(slots, log->slot);
312 	r = -ENOENT;
313 	if (!memslot->dirty_bitmap)
314 		goto out;
315 
316 	kvm_s390_sync_dirty_log(kvm, memslot);
317 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
318 	if (r)
319 		goto out;
320 
321 	/* Clear the dirty log */
322 	if (is_dirty) {
323 		n = kvm_dirty_bitmap_bytes(memslot);
324 		memset(memslot->dirty_bitmap, 0, n);
325 	}
326 	r = 0;
327 out:
328 	mutex_unlock(&kvm->slots_lock);
329 	return r;
330 }
331 
332 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
333 {
334 	int r;
335 
336 	if (cap->flags)
337 		return -EINVAL;
338 
339 	switch (cap->cap) {
340 	case KVM_CAP_S390_IRQCHIP:
341 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
342 		kvm->arch.use_irqchip = 1;
343 		r = 0;
344 		break;
345 	case KVM_CAP_S390_USER_SIGP:
346 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
347 		kvm->arch.user_sigp = 1;
348 		r = 0;
349 		break;
350 	case KVM_CAP_S390_VECTOR_REGISTERS:
351 		mutex_lock(&kvm->lock);
352 		if (atomic_read(&kvm->online_vcpus)) {
353 			r = -EBUSY;
354 		} else if (MACHINE_HAS_VX) {
355 			set_kvm_facility(kvm->arch.model.fac->mask, 129);
356 			set_kvm_facility(kvm->arch.model.fac->list, 129);
357 			r = 0;
358 		} else
359 			r = -EINVAL;
360 		mutex_unlock(&kvm->lock);
361 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
362 			 r ? "(not available)" : "(success)");
363 		break;
364 	case KVM_CAP_S390_RI:
365 		r = -EINVAL;
366 		mutex_lock(&kvm->lock);
367 		if (atomic_read(&kvm->online_vcpus)) {
368 			r = -EBUSY;
369 		} else if (test_facility(64)) {
370 			set_kvm_facility(kvm->arch.model.fac->mask, 64);
371 			set_kvm_facility(kvm->arch.model.fac->list, 64);
372 			r = 0;
373 		}
374 		mutex_unlock(&kvm->lock);
375 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
376 			 r ? "(not available)" : "(success)");
377 		break;
378 	case KVM_CAP_S390_USER_STSI:
379 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
380 		kvm->arch.user_stsi = 1;
381 		r = 0;
382 		break;
383 	default:
384 		r = -EINVAL;
385 		break;
386 	}
387 	return r;
388 }
389 
390 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
391 {
392 	int ret;
393 
394 	switch (attr->attr) {
395 	case KVM_S390_VM_MEM_LIMIT_SIZE:
396 		ret = 0;
397 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
398 			 kvm->arch.mem_limit);
399 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
400 			ret = -EFAULT;
401 		break;
402 	default:
403 		ret = -ENXIO;
404 		break;
405 	}
406 	return ret;
407 }
408 
409 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
410 {
411 	int ret;
412 	unsigned int idx;
413 	switch (attr->attr) {
414 	case KVM_S390_VM_MEM_ENABLE_CMMA:
415 		/* enable CMMA only for z10 and later (EDAT_1) */
416 		ret = -EINVAL;
417 		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
418 			break;
419 
420 		ret = -EBUSY;
421 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
422 		mutex_lock(&kvm->lock);
423 		if (atomic_read(&kvm->online_vcpus) == 0) {
424 			kvm->arch.use_cmma = 1;
425 			ret = 0;
426 		}
427 		mutex_unlock(&kvm->lock);
428 		break;
429 	case KVM_S390_VM_MEM_CLR_CMMA:
430 		ret = -EINVAL;
431 		if (!kvm->arch.use_cmma)
432 			break;
433 
434 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
435 		mutex_lock(&kvm->lock);
436 		idx = srcu_read_lock(&kvm->srcu);
437 		s390_reset_cmma(kvm->arch.gmap->mm);
438 		srcu_read_unlock(&kvm->srcu, idx);
439 		mutex_unlock(&kvm->lock);
440 		ret = 0;
441 		break;
442 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
443 		unsigned long new_limit;
444 
445 		if (kvm_is_ucontrol(kvm))
446 			return -EINVAL;
447 
448 		if (get_user(new_limit, (u64 __user *)attr->addr))
449 			return -EFAULT;
450 
451 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
452 		    new_limit > kvm->arch.mem_limit)
453 			return -E2BIG;
454 
455 		if (!new_limit)
456 			return -EINVAL;
457 
458 		/* gmap_alloc takes last usable address */
459 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
460 			new_limit -= 1;
461 
462 		ret = -EBUSY;
463 		mutex_lock(&kvm->lock);
464 		if (atomic_read(&kvm->online_vcpus) == 0) {
465 			/* gmap_alloc will round the limit up */
466 			struct gmap *new = gmap_alloc(current->mm, new_limit);
467 
468 			if (!new) {
469 				ret = -ENOMEM;
470 			} else {
471 				gmap_free(kvm->arch.gmap);
472 				new->private = kvm;
473 				kvm->arch.gmap = new;
474 				ret = 0;
475 			}
476 		}
477 		mutex_unlock(&kvm->lock);
478 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
479 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
480 			 (void *) kvm->arch.gmap->asce);
481 		break;
482 	}
483 	default:
484 		ret = -ENXIO;
485 		break;
486 	}
487 	return ret;
488 }
489 
490 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
491 
492 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
493 {
494 	struct kvm_vcpu *vcpu;
495 	int i;
496 
497 	if (!test_kvm_facility(kvm, 76))
498 		return -EINVAL;
499 
500 	mutex_lock(&kvm->lock);
501 	switch (attr->attr) {
502 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
503 		get_random_bytes(
504 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
505 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
506 		kvm->arch.crypto.aes_kw = 1;
507 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
508 		break;
509 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
510 		get_random_bytes(
511 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
512 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
513 		kvm->arch.crypto.dea_kw = 1;
514 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
515 		break;
516 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
517 		kvm->arch.crypto.aes_kw = 0;
518 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
519 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
520 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
521 		break;
522 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
523 		kvm->arch.crypto.dea_kw = 0;
524 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
525 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
526 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
527 		break;
528 	default:
529 		mutex_unlock(&kvm->lock);
530 		return -ENXIO;
531 	}
532 
533 	kvm_for_each_vcpu(i, vcpu, kvm) {
534 		kvm_s390_vcpu_crypto_setup(vcpu);
535 		exit_sie(vcpu);
536 	}
537 	mutex_unlock(&kvm->lock);
538 	return 0;
539 }
540 
541 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
542 {
543 	u8 gtod_high;
544 
545 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
546 					   sizeof(gtod_high)))
547 		return -EFAULT;
548 
549 	if (gtod_high != 0)
550 		return -EINVAL;
551 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
552 
553 	return 0;
554 }
555 
556 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
557 {
558 	u64 gtod;
559 
560 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
561 		return -EFAULT;
562 
563 	kvm_s390_set_tod_clock(kvm, gtod);
564 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
565 	return 0;
566 }
567 
568 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
569 {
570 	int ret;
571 
572 	if (attr->flags)
573 		return -EINVAL;
574 
575 	switch (attr->attr) {
576 	case KVM_S390_VM_TOD_HIGH:
577 		ret = kvm_s390_set_tod_high(kvm, attr);
578 		break;
579 	case KVM_S390_VM_TOD_LOW:
580 		ret = kvm_s390_set_tod_low(kvm, attr);
581 		break;
582 	default:
583 		ret = -ENXIO;
584 		break;
585 	}
586 	return ret;
587 }
588 
589 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
590 {
591 	u8 gtod_high = 0;
592 
593 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
594 					 sizeof(gtod_high)))
595 		return -EFAULT;
596 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
597 
598 	return 0;
599 }
600 
601 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
602 {
603 	u64 gtod;
604 
605 	gtod = kvm_s390_get_tod_clock_fast(kvm);
606 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
607 		return -EFAULT;
608 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
609 
610 	return 0;
611 }
612 
613 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
614 {
615 	int ret;
616 
617 	if (attr->flags)
618 		return -EINVAL;
619 
620 	switch (attr->attr) {
621 	case KVM_S390_VM_TOD_HIGH:
622 		ret = kvm_s390_get_tod_high(kvm, attr);
623 		break;
624 	case KVM_S390_VM_TOD_LOW:
625 		ret = kvm_s390_get_tod_low(kvm, attr);
626 		break;
627 	default:
628 		ret = -ENXIO;
629 		break;
630 	}
631 	return ret;
632 }
633 
634 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
635 {
636 	struct kvm_s390_vm_cpu_processor *proc;
637 	int ret = 0;
638 
639 	mutex_lock(&kvm->lock);
640 	if (atomic_read(&kvm->online_vcpus)) {
641 		ret = -EBUSY;
642 		goto out;
643 	}
644 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
645 	if (!proc) {
646 		ret = -ENOMEM;
647 		goto out;
648 	}
649 	if (!copy_from_user(proc, (void __user *)attr->addr,
650 			    sizeof(*proc))) {
651 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
652 		       sizeof(struct cpuid));
653 		kvm->arch.model.ibc = proc->ibc;
654 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
655 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
656 	} else
657 		ret = -EFAULT;
658 	kfree(proc);
659 out:
660 	mutex_unlock(&kvm->lock);
661 	return ret;
662 }
663 
664 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
665 {
666 	int ret = -ENXIO;
667 
668 	switch (attr->attr) {
669 	case KVM_S390_VM_CPU_PROCESSOR:
670 		ret = kvm_s390_set_processor(kvm, attr);
671 		break;
672 	}
673 	return ret;
674 }
675 
676 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
677 {
678 	struct kvm_s390_vm_cpu_processor *proc;
679 	int ret = 0;
680 
681 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
682 	if (!proc) {
683 		ret = -ENOMEM;
684 		goto out;
685 	}
686 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
687 	proc->ibc = kvm->arch.model.ibc;
688 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
689 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
690 		ret = -EFAULT;
691 	kfree(proc);
692 out:
693 	return ret;
694 }
695 
696 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
697 {
698 	struct kvm_s390_vm_cpu_machine *mach;
699 	int ret = 0;
700 
701 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
702 	if (!mach) {
703 		ret = -ENOMEM;
704 		goto out;
705 	}
706 	get_cpu_id((struct cpuid *) &mach->cpuid);
707 	mach->ibc = sclp.ibc;
708 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
709 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
710 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
711 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
712 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
713 		ret = -EFAULT;
714 	kfree(mach);
715 out:
716 	return ret;
717 }
718 
719 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
720 {
721 	int ret = -ENXIO;
722 
723 	switch (attr->attr) {
724 	case KVM_S390_VM_CPU_PROCESSOR:
725 		ret = kvm_s390_get_processor(kvm, attr);
726 		break;
727 	case KVM_S390_VM_CPU_MACHINE:
728 		ret = kvm_s390_get_machine(kvm, attr);
729 		break;
730 	}
731 	return ret;
732 }
733 
734 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
735 {
736 	int ret;
737 
738 	switch (attr->group) {
739 	case KVM_S390_VM_MEM_CTRL:
740 		ret = kvm_s390_set_mem_control(kvm, attr);
741 		break;
742 	case KVM_S390_VM_TOD:
743 		ret = kvm_s390_set_tod(kvm, attr);
744 		break;
745 	case KVM_S390_VM_CPU_MODEL:
746 		ret = kvm_s390_set_cpu_model(kvm, attr);
747 		break;
748 	case KVM_S390_VM_CRYPTO:
749 		ret = kvm_s390_vm_set_crypto(kvm, attr);
750 		break;
751 	default:
752 		ret = -ENXIO;
753 		break;
754 	}
755 
756 	return ret;
757 }
758 
759 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
760 {
761 	int ret;
762 
763 	switch (attr->group) {
764 	case KVM_S390_VM_MEM_CTRL:
765 		ret = kvm_s390_get_mem_control(kvm, attr);
766 		break;
767 	case KVM_S390_VM_TOD:
768 		ret = kvm_s390_get_tod(kvm, attr);
769 		break;
770 	case KVM_S390_VM_CPU_MODEL:
771 		ret = kvm_s390_get_cpu_model(kvm, attr);
772 		break;
773 	default:
774 		ret = -ENXIO;
775 		break;
776 	}
777 
778 	return ret;
779 }
780 
781 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
782 {
783 	int ret;
784 
785 	switch (attr->group) {
786 	case KVM_S390_VM_MEM_CTRL:
787 		switch (attr->attr) {
788 		case KVM_S390_VM_MEM_ENABLE_CMMA:
789 		case KVM_S390_VM_MEM_CLR_CMMA:
790 		case KVM_S390_VM_MEM_LIMIT_SIZE:
791 			ret = 0;
792 			break;
793 		default:
794 			ret = -ENXIO;
795 			break;
796 		}
797 		break;
798 	case KVM_S390_VM_TOD:
799 		switch (attr->attr) {
800 		case KVM_S390_VM_TOD_LOW:
801 		case KVM_S390_VM_TOD_HIGH:
802 			ret = 0;
803 			break;
804 		default:
805 			ret = -ENXIO;
806 			break;
807 		}
808 		break;
809 	case KVM_S390_VM_CPU_MODEL:
810 		switch (attr->attr) {
811 		case KVM_S390_VM_CPU_PROCESSOR:
812 		case KVM_S390_VM_CPU_MACHINE:
813 			ret = 0;
814 			break;
815 		default:
816 			ret = -ENXIO;
817 			break;
818 		}
819 		break;
820 	case KVM_S390_VM_CRYPTO:
821 		switch (attr->attr) {
822 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
823 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
824 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
825 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
826 			ret = 0;
827 			break;
828 		default:
829 			ret = -ENXIO;
830 			break;
831 		}
832 		break;
833 	default:
834 		ret = -ENXIO;
835 		break;
836 	}
837 
838 	return ret;
839 }
840 
841 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
842 {
843 	uint8_t *keys;
844 	uint64_t hva;
845 	unsigned long curkey;
846 	int i, r = 0;
847 
848 	if (args->flags != 0)
849 		return -EINVAL;
850 
851 	/* Is this guest using storage keys? */
852 	if (!mm_use_skey(current->mm))
853 		return KVM_S390_GET_SKEYS_NONE;
854 
855 	/* Enforce sane limit on memory allocation */
856 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
857 		return -EINVAL;
858 
859 	keys = kmalloc_array(args->count, sizeof(uint8_t),
860 			     GFP_KERNEL | __GFP_NOWARN);
861 	if (!keys)
862 		keys = vmalloc(sizeof(uint8_t) * args->count);
863 	if (!keys)
864 		return -ENOMEM;
865 
866 	for (i = 0; i < args->count; i++) {
867 		hva = gfn_to_hva(kvm, args->start_gfn + i);
868 		if (kvm_is_error_hva(hva)) {
869 			r = -EFAULT;
870 			goto out;
871 		}
872 
873 		curkey = get_guest_storage_key(current->mm, hva);
874 		if (IS_ERR_VALUE(curkey)) {
875 			r = curkey;
876 			goto out;
877 		}
878 		keys[i] = curkey;
879 	}
880 
881 	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
882 			 sizeof(uint8_t) * args->count);
883 	if (r)
884 		r = -EFAULT;
885 out:
886 	kvfree(keys);
887 	return r;
888 }
889 
890 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
891 {
892 	uint8_t *keys;
893 	uint64_t hva;
894 	int i, r = 0;
895 
896 	if (args->flags != 0)
897 		return -EINVAL;
898 
899 	/* Enforce sane limit on memory allocation */
900 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
901 		return -EINVAL;
902 
903 	keys = kmalloc_array(args->count, sizeof(uint8_t),
904 			     GFP_KERNEL | __GFP_NOWARN);
905 	if (!keys)
906 		keys = vmalloc(sizeof(uint8_t) * args->count);
907 	if (!keys)
908 		return -ENOMEM;
909 
910 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
911 			   sizeof(uint8_t) * args->count);
912 	if (r) {
913 		r = -EFAULT;
914 		goto out;
915 	}
916 
917 	/* Enable storage key handling for the guest */
918 	r = s390_enable_skey();
919 	if (r)
920 		goto out;
921 
922 	for (i = 0; i < args->count; i++) {
923 		hva = gfn_to_hva(kvm, args->start_gfn + i);
924 		if (kvm_is_error_hva(hva)) {
925 			r = -EFAULT;
926 			goto out;
927 		}
928 
929 		/* Lowest order bit is reserved */
930 		if (keys[i] & 0x01) {
931 			r = -EINVAL;
932 			goto out;
933 		}
934 
935 		r = set_guest_storage_key(current->mm, hva,
936 					  (unsigned long)keys[i], 0);
937 		if (r)
938 			goto out;
939 	}
940 out:
941 	kvfree(keys);
942 	return r;
943 }
944 
945 long kvm_arch_vm_ioctl(struct file *filp,
946 		       unsigned int ioctl, unsigned long arg)
947 {
948 	struct kvm *kvm = filp->private_data;
949 	void __user *argp = (void __user *)arg;
950 	struct kvm_device_attr attr;
951 	int r;
952 
953 	switch (ioctl) {
954 	case KVM_S390_INTERRUPT: {
955 		struct kvm_s390_interrupt s390int;
956 
957 		r = -EFAULT;
958 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
959 			break;
960 		r = kvm_s390_inject_vm(kvm, &s390int);
961 		break;
962 	}
963 	case KVM_ENABLE_CAP: {
964 		struct kvm_enable_cap cap;
965 		r = -EFAULT;
966 		if (copy_from_user(&cap, argp, sizeof(cap)))
967 			break;
968 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
969 		break;
970 	}
971 	case KVM_CREATE_IRQCHIP: {
972 		struct kvm_irq_routing_entry routing;
973 
974 		r = -EINVAL;
975 		if (kvm->arch.use_irqchip) {
976 			/* Set up dummy routing. */
977 			memset(&routing, 0, sizeof(routing));
978 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
979 		}
980 		break;
981 	}
982 	case KVM_SET_DEVICE_ATTR: {
983 		r = -EFAULT;
984 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
985 			break;
986 		r = kvm_s390_vm_set_attr(kvm, &attr);
987 		break;
988 	}
989 	case KVM_GET_DEVICE_ATTR: {
990 		r = -EFAULT;
991 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
992 			break;
993 		r = kvm_s390_vm_get_attr(kvm, &attr);
994 		break;
995 	}
996 	case KVM_HAS_DEVICE_ATTR: {
997 		r = -EFAULT;
998 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
999 			break;
1000 		r = kvm_s390_vm_has_attr(kvm, &attr);
1001 		break;
1002 	}
1003 	case KVM_S390_GET_SKEYS: {
1004 		struct kvm_s390_skeys args;
1005 
1006 		r = -EFAULT;
1007 		if (copy_from_user(&args, argp,
1008 				   sizeof(struct kvm_s390_skeys)))
1009 			break;
1010 		r = kvm_s390_get_skeys(kvm, &args);
1011 		break;
1012 	}
1013 	case KVM_S390_SET_SKEYS: {
1014 		struct kvm_s390_skeys args;
1015 
1016 		r = -EFAULT;
1017 		if (copy_from_user(&args, argp,
1018 				   sizeof(struct kvm_s390_skeys)))
1019 			break;
1020 		r = kvm_s390_set_skeys(kvm, &args);
1021 		break;
1022 	}
1023 	default:
1024 		r = -ENOTTY;
1025 	}
1026 
1027 	return r;
1028 }
1029 
1030 static int kvm_s390_query_ap_config(u8 *config)
1031 {
1032 	u32 fcn_code = 0x04000000UL;
1033 	u32 cc = 0;
1034 
1035 	memset(config, 0, 128);
1036 	asm volatile(
1037 		"lgr 0,%1\n"
1038 		"lgr 2,%2\n"
1039 		".long 0xb2af0000\n"		/* PQAP(QCI) */
1040 		"0: ipm %0\n"
1041 		"srl %0,28\n"
1042 		"1:\n"
1043 		EX_TABLE(0b, 1b)
1044 		: "+r" (cc)
1045 		: "r" (fcn_code), "r" (config)
1046 		: "cc", "0", "2", "memory"
1047 	);
1048 
1049 	return cc;
1050 }
1051 
1052 static int kvm_s390_apxa_installed(void)
1053 {
1054 	u8 config[128];
1055 	int cc;
1056 
1057 	if (test_facility(12)) {
1058 		cc = kvm_s390_query_ap_config(config);
1059 
1060 		if (cc)
1061 			pr_err("PQAP(QCI) failed with cc=%d", cc);
1062 		else
1063 			return config[0] & 0x40;
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1070 {
1071 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1072 
1073 	if (kvm_s390_apxa_installed())
1074 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1075 	else
1076 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1077 }
1078 
1079 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1080 {
1081 	get_cpu_id(cpu_id);
1082 	cpu_id->version = 0xff;
1083 }
1084 
1085 static int kvm_s390_crypto_init(struct kvm *kvm)
1086 {
1087 	if (!test_kvm_facility(kvm, 76))
1088 		return 0;
1089 
1090 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1091 					 GFP_KERNEL | GFP_DMA);
1092 	if (!kvm->arch.crypto.crycb)
1093 		return -ENOMEM;
1094 
1095 	kvm_s390_set_crycb_format(kvm);
1096 
1097 	/* Enable AES/DEA protected key functions by default */
1098 	kvm->arch.crypto.aes_kw = 1;
1099 	kvm->arch.crypto.dea_kw = 1;
1100 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1101 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1102 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1103 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1104 
1105 	return 0;
1106 }
1107 
1108 static void sca_dispose(struct kvm *kvm)
1109 {
1110 	if (kvm->arch.use_esca)
1111 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1112 	else
1113 		free_page((unsigned long)(kvm->arch.sca));
1114 	kvm->arch.sca = NULL;
1115 }
1116 
1117 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1118 {
1119 	int i, rc;
1120 	char debug_name[16];
1121 	static unsigned long sca_offset;
1122 
1123 	rc = -EINVAL;
1124 #ifdef CONFIG_KVM_S390_UCONTROL
1125 	if (type & ~KVM_VM_S390_UCONTROL)
1126 		goto out_err;
1127 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1128 		goto out_err;
1129 #else
1130 	if (type)
1131 		goto out_err;
1132 #endif
1133 
1134 	rc = s390_enable_sie();
1135 	if (rc)
1136 		goto out_err;
1137 
1138 	rc = -ENOMEM;
1139 
1140 	kvm->arch.use_esca = 0; /* start with basic SCA */
1141 	rwlock_init(&kvm->arch.sca_lock);
1142 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1143 	if (!kvm->arch.sca)
1144 		goto out_err;
1145 	spin_lock(&kvm_lock);
1146 	sca_offset += 16;
1147 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1148 		sca_offset = 0;
1149 	kvm->arch.sca = (struct bsca_block *)
1150 			((char *) kvm->arch.sca + sca_offset);
1151 	spin_unlock(&kvm_lock);
1152 
1153 	sprintf(debug_name, "kvm-%u", current->pid);
1154 
1155 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1156 	if (!kvm->arch.dbf)
1157 		goto out_err;
1158 
1159 	/*
1160 	 * The architectural maximum amount of facilities is 16 kbit. To store
1161 	 * this amount, 2 kbyte of memory is required. Thus we need a full
1162 	 * page to hold the guest facility list (arch.model.fac->list) and the
1163 	 * facility mask (arch.model.fac->mask). Its address size has to be
1164 	 * 31 bits and word aligned.
1165 	 */
1166 	kvm->arch.model.fac =
1167 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1168 	if (!kvm->arch.model.fac)
1169 		goto out_err;
1170 
1171 	/* Populate the facility mask initially. */
1172 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1173 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1174 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1175 		if (i < kvm_s390_fac_list_mask_size())
1176 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1177 		else
1178 			kvm->arch.model.fac->mask[i] = 0UL;
1179 	}
1180 
1181 	/* Populate the facility list initially. */
1182 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1183 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1184 
1185 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1186 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1187 
1188 	if (kvm_s390_crypto_init(kvm) < 0)
1189 		goto out_err;
1190 
1191 	spin_lock_init(&kvm->arch.float_int.lock);
1192 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
1193 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1194 	init_waitqueue_head(&kvm->arch.ipte_wq);
1195 	mutex_init(&kvm->arch.ipte_mutex);
1196 
1197 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1198 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1199 
1200 	if (type & KVM_VM_S390_UCONTROL) {
1201 		kvm->arch.gmap = NULL;
1202 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1203 	} else {
1204 		if (sclp.hamax == U64_MAX)
1205 			kvm->arch.mem_limit = TASK_MAX_SIZE;
1206 		else
1207 			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1208 						    sclp.hamax + 1);
1209 		kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1210 		if (!kvm->arch.gmap)
1211 			goto out_err;
1212 		kvm->arch.gmap->private = kvm;
1213 		kvm->arch.gmap->pfault_enabled = 0;
1214 	}
1215 
1216 	kvm->arch.css_support = 0;
1217 	kvm->arch.use_irqchip = 0;
1218 	kvm->arch.epoch = 0;
1219 
1220 	spin_lock_init(&kvm->arch.start_stop_lock);
1221 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1222 
1223 	return 0;
1224 out_err:
1225 	kfree(kvm->arch.crypto.crycb);
1226 	free_page((unsigned long)kvm->arch.model.fac);
1227 	debug_unregister(kvm->arch.dbf);
1228 	sca_dispose(kvm);
1229 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1230 	return rc;
1231 }
1232 
1233 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1234 {
1235 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1236 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1237 	kvm_s390_clear_local_irqs(vcpu);
1238 	kvm_clear_async_pf_completion_queue(vcpu);
1239 	if (!kvm_is_ucontrol(vcpu->kvm))
1240 		sca_del_vcpu(vcpu);
1241 
1242 	if (kvm_is_ucontrol(vcpu->kvm))
1243 		gmap_free(vcpu->arch.gmap);
1244 
1245 	if (vcpu->kvm->arch.use_cmma)
1246 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1247 	free_page((unsigned long)(vcpu->arch.sie_block));
1248 
1249 	kvm_vcpu_uninit(vcpu);
1250 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1251 }
1252 
1253 static void kvm_free_vcpus(struct kvm *kvm)
1254 {
1255 	unsigned int i;
1256 	struct kvm_vcpu *vcpu;
1257 
1258 	kvm_for_each_vcpu(i, vcpu, kvm)
1259 		kvm_arch_vcpu_destroy(vcpu);
1260 
1261 	mutex_lock(&kvm->lock);
1262 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1263 		kvm->vcpus[i] = NULL;
1264 
1265 	atomic_set(&kvm->online_vcpus, 0);
1266 	mutex_unlock(&kvm->lock);
1267 }
1268 
1269 void kvm_arch_destroy_vm(struct kvm *kvm)
1270 {
1271 	kvm_free_vcpus(kvm);
1272 	free_page((unsigned long)kvm->arch.model.fac);
1273 	sca_dispose(kvm);
1274 	debug_unregister(kvm->arch.dbf);
1275 	kfree(kvm->arch.crypto.crycb);
1276 	if (!kvm_is_ucontrol(kvm))
1277 		gmap_free(kvm->arch.gmap);
1278 	kvm_s390_destroy_adapters(kvm);
1279 	kvm_s390_clear_float_irqs(kvm);
1280 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1281 }
1282 
1283 /* Section: vcpu related */
1284 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1285 {
1286 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1287 	if (!vcpu->arch.gmap)
1288 		return -ENOMEM;
1289 	vcpu->arch.gmap->private = vcpu->kvm;
1290 
1291 	return 0;
1292 }
1293 
1294 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1295 {
1296 	read_lock(&vcpu->kvm->arch.sca_lock);
1297 	if (vcpu->kvm->arch.use_esca) {
1298 		struct esca_block *sca = vcpu->kvm->arch.sca;
1299 
1300 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1301 		sca->cpu[vcpu->vcpu_id].sda = 0;
1302 	} else {
1303 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1304 
1305 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1306 		sca->cpu[vcpu->vcpu_id].sda = 0;
1307 	}
1308 	read_unlock(&vcpu->kvm->arch.sca_lock);
1309 }
1310 
1311 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1312 {
1313 	read_lock(&vcpu->kvm->arch.sca_lock);
1314 	if (vcpu->kvm->arch.use_esca) {
1315 		struct esca_block *sca = vcpu->kvm->arch.sca;
1316 
1317 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1318 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1319 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1320 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1321 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1322 	} else {
1323 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1324 
1325 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1326 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1327 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1328 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1329 	}
1330 	read_unlock(&vcpu->kvm->arch.sca_lock);
1331 }
1332 
1333 /* Basic SCA to Extended SCA data copy routines */
1334 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1335 {
1336 	d->sda = s->sda;
1337 	d->sigp_ctrl.c = s->sigp_ctrl.c;
1338 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1339 }
1340 
1341 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1342 {
1343 	int i;
1344 
1345 	d->ipte_control = s->ipte_control;
1346 	d->mcn[0] = s->mcn;
1347 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1348 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1349 }
1350 
1351 static int sca_switch_to_extended(struct kvm *kvm)
1352 {
1353 	struct bsca_block *old_sca = kvm->arch.sca;
1354 	struct esca_block *new_sca;
1355 	struct kvm_vcpu *vcpu;
1356 	unsigned int vcpu_idx;
1357 	u32 scaol, scaoh;
1358 
1359 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1360 	if (!new_sca)
1361 		return -ENOMEM;
1362 
1363 	scaoh = (u32)((u64)(new_sca) >> 32);
1364 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
1365 
1366 	kvm_s390_vcpu_block_all(kvm);
1367 	write_lock(&kvm->arch.sca_lock);
1368 
1369 	sca_copy_b_to_e(new_sca, old_sca);
1370 
1371 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1372 		vcpu->arch.sie_block->scaoh = scaoh;
1373 		vcpu->arch.sie_block->scaol = scaol;
1374 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1375 	}
1376 	kvm->arch.sca = new_sca;
1377 	kvm->arch.use_esca = 1;
1378 
1379 	write_unlock(&kvm->arch.sca_lock);
1380 	kvm_s390_vcpu_unblock_all(kvm);
1381 
1382 	free_page((unsigned long)old_sca);
1383 
1384 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1385 		 old_sca, kvm->arch.sca);
1386 	return 0;
1387 }
1388 
1389 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1390 {
1391 	int rc;
1392 
1393 	if (id < KVM_S390_BSCA_CPU_SLOTS)
1394 		return true;
1395 	if (!sclp.has_esca)
1396 		return false;
1397 
1398 	mutex_lock(&kvm->lock);
1399 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1400 	mutex_unlock(&kvm->lock);
1401 
1402 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1403 }
1404 
1405 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1406 {
1407 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1408 	kvm_clear_async_pf_completion_queue(vcpu);
1409 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1410 				    KVM_SYNC_GPRS |
1411 				    KVM_SYNC_ACRS |
1412 				    KVM_SYNC_CRS |
1413 				    KVM_SYNC_ARCH0 |
1414 				    KVM_SYNC_PFAULT;
1415 	if (test_kvm_facility(vcpu->kvm, 64))
1416 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1417 	if (test_kvm_facility(vcpu->kvm, 129))
1418 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1419 
1420 	if (kvm_is_ucontrol(vcpu->kvm))
1421 		return __kvm_ucontrol_vcpu_init(vcpu);
1422 
1423 	return 0;
1424 }
1425 
1426 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1427 {
1428 	/* Save host register state */
1429 	save_fpu_regs();
1430 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1431 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
1432 
1433 	/* Depending on MACHINE_HAS_VX, data stored to vrs either
1434 	 * has vector register or floating point register format.
1435 	 */
1436 	current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1437 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1438 	if (test_fp_ctl(current->thread.fpu.fpc))
1439 		/* User space provided an invalid FPC, let's clear it */
1440 		current->thread.fpu.fpc = 0;
1441 
1442 	save_access_regs(vcpu->arch.host_acrs);
1443 	restore_access_regs(vcpu->run->s.regs.acrs);
1444 	gmap_enable(vcpu->arch.gmap);
1445 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1446 }
1447 
1448 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1449 {
1450 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1451 	gmap_disable(vcpu->arch.gmap);
1452 
1453 	/* Save guest register state */
1454 	save_fpu_regs();
1455 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1456 
1457 	/* Restore host register state */
1458 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1459 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
1460 
1461 	save_access_regs(vcpu->run->s.regs.acrs);
1462 	restore_access_regs(vcpu->arch.host_acrs);
1463 }
1464 
1465 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1466 {
1467 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1468 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1469 	vcpu->arch.sie_block->gpsw.addr = 0UL;
1470 	kvm_s390_set_prefix(vcpu, 0);
1471 	vcpu->arch.sie_block->cputm     = 0UL;
1472 	vcpu->arch.sie_block->ckc       = 0UL;
1473 	vcpu->arch.sie_block->todpr     = 0;
1474 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1475 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1476 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1477 	/* make sure the new fpc will be lazily loaded */
1478 	save_fpu_regs();
1479 	current->thread.fpu.fpc = 0;
1480 	vcpu->arch.sie_block->gbea = 1;
1481 	vcpu->arch.sie_block->pp = 0;
1482 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1483 	kvm_clear_async_pf_completion_queue(vcpu);
1484 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1485 		kvm_s390_vcpu_stop(vcpu);
1486 	kvm_s390_clear_local_irqs(vcpu);
1487 }
1488 
1489 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1490 {
1491 	mutex_lock(&vcpu->kvm->lock);
1492 	preempt_disable();
1493 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1494 	preempt_enable();
1495 	mutex_unlock(&vcpu->kvm->lock);
1496 	if (!kvm_is_ucontrol(vcpu->kvm)) {
1497 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1498 		sca_add_vcpu(vcpu);
1499 	}
1500 
1501 }
1502 
1503 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1504 {
1505 	if (!test_kvm_facility(vcpu->kvm, 76))
1506 		return;
1507 
1508 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1509 
1510 	if (vcpu->kvm->arch.crypto.aes_kw)
1511 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1512 	if (vcpu->kvm->arch.crypto.dea_kw)
1513 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1514 
1515 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1516 }
1517 
1518 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1519 {
1520 	free_page(vcpu->arch.sie_block->cbrlo);
1521 	vcpu->arch.sie_block->cbrlo = 0;
1522 }
1523 
1524 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1525 {
1526 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1527 	if (!vcpu->arch.sie_block->cbrlo)
1528 		return -ENOMEM;
1529 
1530 	vcpu->arch.sie_block->ecb2 |= 0x80;
1531 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1532 	return 0;
1533 }
1534 
1535 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1536 {
1537 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1538 
1539 	vcpu->arch.cpu_id = model->cpu_id;
1540 	vcpu->arch.sie_block->ibc = model->ibc;
1541 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1542 }
1543 
1544 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1545 {
1546 	int rc = 0;
1547 
1548 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1549 						    CPUSTAT_SM |
1550 						    CPUSTAT_STOPPED);
1551 
1552 	if (test_kvm_facility(vcpu->kvm, 78))
1553 		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1554 	else if (test_kvm_facility(vcpu->kvm, 8))
1555 		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1556 
1557 	kvm_s390_vcpu_setup_model(vcpu);
1558 
1559 	vcpu->arch.sie_block->ecb   = 6;
1560 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1561 		vcpu->arch.sie_block->ecb |= 0x10;
1562 
1563 	vcpu->arch.sie_block->ecb2  = 8;
1564 	vcpu->arch.sie_block->eca   = 0xC1002000U;
1565 	if (sclp.has_siif)
1566 		vcpu->arch.sie_block->eca |= 1;
1567 	if (sclp.has_sigpif)
1568 		vcpu->arch.sie_block->eca |= 0x10000000U;
1569 	if (test_kvm_facility(vcpu->kvm, 64))
1570 		vcpu->arch.sie_block->ecb3 |= 0x01;
1571 	if (test_kvm_facility(vcpu->kvm, 129)) {
1572 		vcpu->arch.sie_block->eca |= 0x00020000;
1573 		vcpu->arch.sie_block->ecd |= 0x20000000;
1574 	}
1575 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1576 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1577 
1578 	if (vcpu->kvm->arch.use_cmma) {
1579 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1580 		if (rc)
1581 			return rc;
1582 	}
1583 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1584 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1585 
1586 	kvm_s390_vcpu_crypto_setup(vcpu);
1587 
1588 	return rc;
1589 }
1590 
1591 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1592 				      unsigned int id)
1593 {
1594 	struct kvm_vcpu *vcpu;
1595 	struct sie_page *sie_page;
1596 	int rc = -EINVAL;
1597 
1598 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1599 		goto out;
1600 
1601 	rc = -ENOMEM;
1602 
1603 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1604 	if (!vcpu)
1605 		goto out;
1606 
1607 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1608 	if (!sie_page)
1609 		goto out_free_cpu;
1610 
1611 	vcpu->arch.sie_block = &sie_page->sie_block;
1612 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1613 
1614 	vcpu->arch.sie_block->icpua = id;
1615 	spin_lock_init(&vcpu->arch.local_int.lock);
1616 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1617 	vcpu->arch.local_int.wq = &vcpu->wq;
1618 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1619 
1620 	rc = kvm_vcpu_init(vcpu, kvm, id);
1621 	if (rc)
1622 		goto out_free_sie_block;
1623 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1624 		 vcpu->arch.sie_block);
1625 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1626 
1627 	return vcpu;
1628 out_free_sie_block:
1629 	free_page((unsigned long)(vcpu->arch.sie_block));
1630 out_free_cpu:
1631 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1632 out:
1633 	return ERR_PTR(rc);
1634 }
1635 
1636 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1637 {
1638 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1639 }
1640 
1641 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1642 {
1643 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1644 	exit_sie(vcpu);
1645 }
1646 
1647 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1648 {
1649 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1650 }
1651 
1652 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1653 {
1654 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1655 	exit_sie(vcpu);
1656 }
1657 
1658 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1659 {
1660 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1661 }
1662 
1663 /*
1664  * Kick a guest cpu out of SIE and wait until SIE is not running.
1665  * If the CPU is not running (e.g. waiting as idle) the function will
1666  * return immediately. */
1667 void exit_sie(struct kvm_vcpu *vcpu)
1668 {
1669 	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1670 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1671 		cpu_relax();
1672 }
1673 
1674 /* Kick a guest cpu out of SIE to process a request synchronously */
1675 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1676 {
1677 	kvm_make_request(req, vcpu);
1678 	kvm_s390_vcpu_request(vcpu);
1679 }
1680 
1681 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1682 {
1683 	int i;
1684 	struct kvm *kvm = gmap->private;
1685 	struct kvm_vcpu *vcpu;
1686 
1687 	kvm_for_each_vcpu(i, vcpu, kvm) {
1688 		/* match against both prefix pages */
1689 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1690 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1691 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1692 		}
1693 	}
1694 }
1695 
1696 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1697 {
1698 	/* kvm common code refers to this, but never calls it */
1699 	BUG();
1700 	return 0;
1701 }
1702 
1703 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1704 					   struct kvm_one_reg *reg)
1705 {
1706 	int r = -EINVAL;
1707 
1708 	switch (reg->id) {
1709 	case KVM_REG_S390_TODPR:
1710 		r = put_user(vcpu->arch.sie_block->todpr,
1711 			     (u32 __user *)reg->addr);
1712 		break;
1713 	case KVM_REG_S390_EPOCHDIFF:
1714 		r = put_user(vcpu->arch.sie_block->epoch,
1715 			     (u64 __user *)reg->addr);
1716 		break;
1717 	case KVM_REG_S390_CPU_TIMER:
1718 		r = put_user(vcpu->arch.sie_block->cputm,
1719 			     (u64 __user *)reg->addr);
1720 		break;
1721 	case KVM_REG_S390_CLOCK_COMP:
1722 		r = put_user(vcpu->arch.sie_block->ckc,
1723 			     (u64 __user *)reg->addr);
1724 		break;
1725 	case KVM_REG_S390_PFTOKEN:
1726 		r = put_user(vcpu->arch.pfault_token,
1727 			     (u64 __user *)reg->addr);
1728 		break;
1729 	case KVM_REG_S390_PFCOMPARE:
1730 		r = put_user(vcpu->arch.pfault_compare,
1731 			     (u64 __user *)reg->addr);
1732 		break;
1733 	case KVM_REG_S390_PFSELECT:
1734 		r = put_user(vcpu->arch.pfault_select,
1735 			     (u64 __user *)reg->addr);
1736 		break;
1737 	case KVM_REG_S390_PP:
1738 		r = put_user(vcpu->arch.sie_block->pp,
1739 			     (u64 __user *)reg->addr);
1740 		break;
1741 	case KVM_REG_S390_GBEA:
1742 		r = put_user(vcpu->arch.sie_block->gbea,
1743 			     (u64 __user *)reg->addr);
1744 		break;
1745 	default:
1746 		break;
1747 	}
1748 
1749 	return r;
1750 }
1751 
1752 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1753 					   struct kvm_one_reg *reg)
1754 {
1755 	int r = -EINVAL;
1756 
1757 	switch (reg->id) {
1758 	case KVM_REG_S390_TODPR:
1759 		r = get_user(vcpu->arch.sie_block->todpr,
1760 			     (u32 __user *)reg->addr);
1761 		break;
1762 	case KVM_REG_S390_EPOCHDIFF:
1763 		r = get_user(vcpu->arch.sie_block->epoch,
1764 			     (u64 __user *)reg->addr);
1765 		break;
1766 	case KVM_REG_S390_CPU_TIMER:
1767 		r = get_user(vcpu->arch.sie_block->cputm,
1768 			     (u64 __user *)reg->addr);
1769 		break;
1770 	case KVM_REG_S390_CLOCK_COMP:
1771 		r = get_user(vcpu->arch.sie_block->ckc,
1772 			     (u64 __user *)reg->addr);
1773 		break;
1774 	case KVM_REG_S390_PFTOKEN:
1775 		r = get_user(vcpu->arch.pfault_token,
1776 			     (u64 __user *)reg->addr);
1777 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1778 			kvm_clear_async_pf_completion_queue(vcpu);
1779 		break;
1780 	case KVM_REG_S390_PFCOMPARE:
1781 		r = get_user(vcpu->arch.pfault_compare,
1782 			     (u64 __user *)reg->addr);
1783 		break;
1784 	case KVM_REG_S390_PFSELECT:
1785 		r = get_user(vcpu->arch.pfault_select,
1786 			     (u64 __user *)reg->addr);
1787 		break;
1788 	case KVM_REG_S390_PP:
1789 		r = get_user(vcpu->arch.sie_block->pp,
1790 			     (u64 __user *)reg->addr);
1791 		break;
1792 	case KVM_REG_S390_GBEA:
1793 		r = get_user(vcpu->arch.sie_block->gbea,
1794 			     (u64 __user *)reg->addr);
1795 		break;
1796 	default:
1797 		break;
1798 	}
1799 
1800 	return r;
1801 }
1802 
1803 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1804 {
1805 	kvm_s390_vcpu_initial_reset(vcpu);
1806 	return 0;
1807 }
1808 
1809 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1810 {
1811 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1812 	return 0;
1813 }
1814 
1815 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1816 {
1817 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1818 	return 0;
1819 }
1820 
1821 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1822 				  struct kvm_sregs *sregs)
1823 {
1824 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1825 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1826 	restore_access_regs(vcpu->run->s.regs.acrs);
1827 	return 0;
1828 }
1829 
1830 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1831 				  struct kvm_sregs *sregs)
1832 {
1833 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1834 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1835 	return 0;
1836 }
1837 
1838 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1839 {
1840 	/* make sure the new values will be lazily loaded */
1841 	save_fpu_regs();
1842 	if (test_fp_ctl(fpu->fpc))
1843 		return -EINVAL;
1844 	current->thread.fpu.fpc = fpu->fpc;
1845 	if (MACHINE_HAS_VX)
1846 		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
1847 	else
1848 		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
1849 	return 0;
1850 }
1851 
1852 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1853 {
1854 	/* make sure we have the latest values */
1855 	save_fpu_regs();
1856 	if (MACHINE_HAS_VX)
1857 		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
1858 	else
1859 		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
1860 	fpu->fpc = current->thread.fpu.fpc;
1861 	return 0;
1862 }
1863 
1864 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1865 {
1866 	int rc = 0;
1867 
1868 	if (!is_vcpu_stopped(vcpu))
1869 		rc = -EBUSY;
1870 	else {
1871 		vcpu->run->psw_mask = psw.mask;
1872 		vcpu->run->psw_addr = psw.addr;
1873 	}
1874 	return rc;
1875 }
1876 
1877 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1878 				  struct kvm_translation *tr)
1879 {
1880 	return -EINVAL; /* not implemented yet */
1881 }
1882 
1883 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1884 			      KVM_GUESTDBG_USE_HW_BP | \
1885 			      KVM_GUESTDBG_ENABLE)
1886 
1887 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1888 					struct kvm_guest_debug *dbg)
1889 {
1890 	int rc = 0;
1891 
1892 	vcpu->guest_debug = 0;
1893 	kvm_s390_clear_bp_data(vcpu);
1894 
1895 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1896 		return -EINVAL;
1897 
1898 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
1899 		vcpu->guest_debug = dbg->control;
1900 		/* enforce guest PER */
1901 		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1902 
1903 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1904 			rc = kvm_s390_import_bp_data(vcpu, dbg);
1905 	} else {
1906 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1907 		vcpu->arch.guestdbg.last_bp = 0;
1908 	}
1909 
1910 	if (rc) {
1911 		vcpu->guest_debug = 0;
1912 		kvm_s390_clear_bp_data(vcpu);
1913 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1914 	}
1915 
1916 	return rc;
1917 }
1918 
1919 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1920 				    struct kvm_mp_state *mp_state)
1921 {
1922 	/* CHECK_STOP and LOAD are not supported yet */
1923 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1924 				       KVM_MP_STATE_OPERATING;
1925 }
1926 
1927 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1928 				    struct kvm_mp_state *mp_state)
1929 {
1930 	int rc = 0;
1931 
1932 	/* user space knows about this interface - let it control the state */
1933 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1934 
1935 	switch (mp_state->mp_state) {
1936 	case KVM_MP_STATE_STOPPED:
1937 		kvm_s390_vcpu_stop(vcpu);
1938 		break;
1939 	case KVM_MP_STATE_OPERATING:
1940 		kvm_s390_vcpu_start(vcpu);
1941 		break;
1942 	case KVM_MP_STATE_LOAD:
1943 	case KVM_MP_STATE_CHECK_STOP:
1944 		/* fall through - CHECK_STOP and LOAD are not supported yet */
1945 	default:
1946 		rc = -ENXIO;
1947 	}
1948 
1949 	return rc;
1950 }
1951 
1952 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1953 {
1954 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1955 }
1956 
1957 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1958 {
1959 retry:
1960 	kvm_s390_vcpu_request_handled(vcpu);
1961 	if (!vcpu->requests)
1962 		return 0;
1963 	/*
1964 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1965 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1966 	 * This ensures that the ipte instruction for this request has
1967 	 * already finished. We might race against a second unmapper that
1968 	 * wants to set the blocking bit. Lets just retry the request loop.
1969 	 */
1970 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1971 		int rc;
1972 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1973 				      kvm_s390_get_prefix(vcpu),
1974 				      PAGE_SIZE * 2);
1975 		if (rc)
1976 			return rc;
1977 		goto retry;
1978 	}
1979 
1980 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1981 		vcpu->arch.sie_block->ihcpu = 0xffff;
1982 		goto retry;
1983 	}
1984 
1985 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1986 		if (!ibs_enabled(vcpu)) {
1987 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1988 			atomic_or(CPUSTAT_IBS,
1989 					&vcpu->arch.sie_block->cpuflags);
1990 		}
1991 		goto retry;
1992 	}
1993 
1994 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1995 		if (ibs_enabled(vcpu)) {
1996 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1997 			atomic_andnot(CPUSTAT_IBS,
1998 					  &vcpu->arch.sie_block->cpuflags);
1999 		}
2000 		goto retry;
2001 	}
2002 
2003 	/* nothing to do, just clear the request */
2004 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2005 
2006 	return 0;
2007 }
2008 
2009 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2010 {
2011 	struct kvm_vcpu *vcpu;
2012 	int i;
2013 
2014 	mutex_lock(&kvm->lock);
2015 	preempt_disable();
2016 	kvm->arch.epoch = tod - get_tod_clock();
2017 	kvm_s390_vcpu_block_all(kvm);
2018 	kvm_for_each_vcpu(i, vcpu, kvm)
2019 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2020 	kvm_s390_vcpu_unblock_all(kvm);
2021 	preempt_enable();
2022 	mutex_unlock(&kvm->lock);
2023 }
2024 
2025 /**
2026  * kvm_arch_fault_in_page - fault-in guest page if necessary
2027  * @vcpu: The corresponding virtual cpu
2028  * @gpa: Guest physical address
2029  * @writable: Whether the page should be writable or not
2030  *
2031  * Make sure that a guest page has been faulted-in on the host.
2032  *
2033  * Return: Zero on success, negative error code otherwise.
2034  */
2035 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2036 {
2037 	return gmap_fault(vcpu->arch.gmap, gpa,
2038 			  writable ? FAULT_FLAG_WRITE : 0);
2039 }
2040 
2041 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2042 				      unsigned long token)
2043 {
2044 	struct kvm_s390_interrupt inti;
2045 	struct kvm_s390_irq irq;
2046 
2047 	if (start_token) {
2048 		irq.u.ext.ext_params2 = token;
2049 		irq.type = KVM_S390_INT_PFAULT_INIT;
2050 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2051 	} else {
2052 		inti.type = KVM_S390_INT_PFAULT_DONE;
2053 		inti.parm64 = token;
2054 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2055 	}
2056 }
2057 
2058 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2059 				     struct kvm_async_pf *work)
2060 {
2061 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2062 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2063 }
2064 
2065 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2066 				 struct kvm_async_pf *work)
2067 {
2068 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2069 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2070 }
2071 
2072 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2073 			       struct kvm_async_pf *work)
2074 {
2075 	/* s390 will always inject the page directly */
2076 }
2077 
2078 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2079 {
2080 	/*
2081 	 * s390 will always inject the page directly,
2082 	 * but we still want check_async_completion to cleanup
2083 	 */
2084 	return true;
2085 }
2086 
2087 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2088 {
2089 	hva_t hva;
2090 	struct kvm_arch_async_pf arch;
2091 	int rc;
2092 
2093 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2094 		return 0;
2095 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2096 	    vcpu->arch.pfault_compare)
2097 		return 0;
2098 	if (psw_extint_disabled(vcpu))
2099 		return 0;
2100 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
2101 		return 0;
2102 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2103 		return 0;
2104 	if (!vcpu->arch.gmap->pfault_enabled)
2105 		return 0;
2106 
2107 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2108 	hva += current->thread.gmap_addr & ~PAGE_MASK;
2109 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2110 		return 0;
2111 
2112 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2113 	return rc;
2114 }
2115 
2116 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2117 {
2118 	int rc, cpuflags;
2119 
2120 	/*
2121 	 * On s390 notifications for arriving pages will be delivered directly
2122 	 * to the guest but the house keeping for completed pfaults is
2123 	 * handled outside the worker.
2124 	 */
2125 	kvm_check_async_pf_completion(vcpu);
2126 
2127 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2128 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2129 
2130 	if (need_resched())
2131 		schedule();
2132 
2133 	if (test_cpu_flag(CIF_MCCK_PENDING))
2134 		s390_handle_mcck();
2135 
2136 	if (!kvm_is_ucontrol(vcpu->kvm)) {
2137 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
2138 		if (rc)
2139 			return rc;
2140 	}
2141 
2142 	rc = kvm_s390_handle_requests(vcpu);
2143 	if (rc)
2144 		return rc;
2145 
2146 	if (guestdbg_enabled(vcpu)) {
2147 		kvm_s390_backup_guest_per_regs(vcpu);
2148 		kvm_s390_patch_guest_per_regs(vcpu);
2149 	}
2150 
2151 	vcpu->arch.sie_block->icptcode = 0;
2152 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2153 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2154 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
2155 
2156 	return 0;
2157 }
2158 
2159 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2160 {
2161 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
2162 	u8 opcode;
2163 	int rc;
2164 
2165 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2166 	trace_kvm_s390_sie_fault(vcpu);
2167 
2168 	/*
2169 	 * We want to inject an addressing exception, which is defined as a
2170 	 * suppressing or terminating exception. However, since we came here
2171 	 * by a DAT access exception, the PSW still points to the faulting
2172 	 * instruction since DAT exceptions are nullifying. So we've got
2173 	 * to look up the current opcode to get the length of the instruction
2174 	 * to be able to forward the PSW.
2175 	 */
2176 	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
2177 	if (rc)
2178 		return kvm_s390_inject_prog_cond(vcpu, rc);
2179 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2180 
2181 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2182 }
2183 
2184 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2185 {
2186 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2187 		   vcpu->arch.sie_block->icptcode);
2188 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2189 
2190 	if (guestdbg_enabled(vcpu))
2191 		kvm_s390_restore_guest_per_regs(vcpu);
2192 
2193 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2194 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2195 
2196 	if (vcpu->arch.sie_block->icptcode > 0) {
2197 		int rc = kvm_handle_sie_intercept(vcpu);
2198 
2199 		if (rc != -EOPNOTSUPP)
2200 			return rc;
2201 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2202 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2203 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2204 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2205 		return -EREMOTE;
2206 	} else if (exit_reason != -EFAULT) {
2207 		vcpu->stat.exit_null++;
2208 		return 0;
2209 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2210 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2211 		vcpu->run->s390_ucontrol.trans_exc_code =
2212 						current->thread.gmap_addr;
2213 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
2214 		return -EREMOTE;
2215 	} else if (current->thread.gmap_pfault) {
2216 		trace_kvm_s390_major_guest_pfault(vcpu);
2217 		current->thread.gmap_pfault = 0;
2218 		if (kvm_arch_setup_async_pf(vcpu))
2219 			return 0;
2220 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2221 	}
2222 	return vcpu_post_run_fault_in_sie(vcpu);
2223 }
2224 
2225 static int __vcpu_run(struct kvm_vcpu *vcpu)
2226 {
2227 	int rc, exit_reason;
2228 
2229 	/*
2230 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2231 	 * ning the guest), so that memslots (and other stuff) are protected
2232 	 */
2233 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2234 
2235 	do {
2236 		rc = vcpu_pre_run(vcpu);
2237 		if (rc)
2238 			break;
2239 
2240 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2241 		/*
2242 		 * As PF_VCPU will be used in fault handler, between
2243 		 * guest_enter and guest_exit should be no uaccess.
2244 		 */
2245 		local_irq_disable();
2246 		__kvm_guest_enter();
2247 		local_irq_enable();
2248 		exit_reason = sie64a(vcpu->arch.sie_block,
2249 				     vcpu->run->s.regs.gprs);
2250 		local_irq_disable();
2251 		__kvm_guest_exit();
2252 		local_irq_enable();
2253 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2254 
2255 		rc = vcpu_post_run(vcpu, exit_reason);
2256 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2257 
2258 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2259 	return rc;
2260 }
2261 
2262 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2263 {
2264 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2265 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2266 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2267 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2268 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2269 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2270 		/* some control register changes require a tlb flush */
2271 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2272 	}
2273 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2274 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2275 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2276 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2277 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2278 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2279 	}
2280 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2281 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2282 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2283 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2284 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2285 			kvm_clear_async_pf_completion_queue(vcpu);
2286 	}
2287 	kvm_run->kvm_dirty_regs = 0;
2288 }
2289 
2290 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2291 {
2292 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2293 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2294 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2295 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2296 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2297 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2298 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2299 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2300 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2301 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2302 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2303 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2304 }
2305 
2306 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2307 {
2308 	int rc;
2309 	sigset_t sigsaved;
2310 
2311 	if (guestdbg_exit_pending(vcpu)) {
2312 		kvm_s390_prepare_debug_exit(vcpu);
2313 		return 0;
2314 	}
2315 
2316 	if (vcpu->sigset_active)
2317 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2318 
2319 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2320 		kvm_s390_vcpu_start(vcpu);
2321 	} else if (is_vcpu_stopped(vcpu)) {
2322 		pr_err_ratelimited("can't run stopped vcpu %d\n",
2323 				   vcpu->vcpu_id);
2324 		return -EINVAL;
2325 	}
2326 
2327 	sync_regs(vcpu, kvm_run);
2328 
2329 	might_fault();
2330 	rc = __vcpu_run(vcpu);
2331 
2332 	if (signal_pending(current) && !rc) {
2333 		kvm_run->exit_reason = KVM_EXIT_INTR;
2334 		rc = -EINTR;
2335 	}
2336 
2337 	if (guestdbg_exit_pending(vcpu) && !rc)  {
2338 		kvm_s390_prepare_debug_exit(vcpu);
2339 		rc = 0;
2340 	}
2341 
2342 	if (rc == -EREMOTE) {
2343 		/* userspace support is needed, kvm_run has been prepared */
2344 		rc = 0;
2345 	}
2346 
2347 	store_regs(vcpu, kvm_run);
2348 
2349 	if (vcpu->sigset_active)
2350 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2351 
2352 	vcpu->stat.exit_userspace++;
2353 	return rc;
2354 }
2355 
2356 /*
2357  * store status at address
2358  * we use have two special cases:
2359  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2360  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2361  */
2362 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2363 {
2364 	unsigned char archmode = 1;
2365 	freg_t fprs[NUM_FPRS];
2366 	unsigned int px;
2367 	u64 clkcomp;
2368 	int rc;
2369 
2370 	px = kvm_s390_get_prefix(vcpu);
2371 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2372 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2373 			return -EFAULT;
2374 		gpa = 0;
2375 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2376 		if (write_guest_real(vcpu, 163, &archmode, 1))
2377 			return -EFAULT;
2378 		gpa = px;
2379 	} else
2380 		gpa -= __LC_FPREGS_SAVE_AREA;
2381 
2382 	/* manually convert vector registers if necessary */
2383 	if (MACHINE_HAS_VX) {
2384 		convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
2385 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2386 				     fprs, 128);
2387 	} else {
2388 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2389 				     vcpu->run->s.regs.vrs, 128);
2390 	}
2391 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2392 			      vcpu->run->s.regs.gprs, 128);
2393 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2394 			      &vcpu->arch.sie_block->gpsw, 16);
2395 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2396 			      &px, 4);
2397 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2398 			      &vcpu->run->s.regs.fpc, 4);
2399 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2400 			      &vcpu->arch.sie_block->todpr, 4);
2401 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2402 			      &vcpu->arch.sie_block->cputm, 8);
2403 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2404 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2405 			      &clkcomp, 8);
2406 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2407 			      &vcpu->run->s.regs.acrs, 64);
2408 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2409 			      &vcpu->arch.sie_block->gcr, 128);
2410 	return rc ? -EFAULT : 0;
2411 }
2412 
2413 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2414 {
2415 	/*
2416 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2417 	 * copying in vcpu load/put. Lets update our copies before we save
2418 	 * it into the save area
2419 	 */
2420 	save_fpu_regs();
2421 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2422 	save_access_regs(vcpu->run->s.regs.acrs);
2423 
2424 	return kvm_s390_store_status_unloaded(vcpu, addr);
2425 }
2426 
2427 /*
2428  * store additional status at address
2429  */
2430 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2431 					unsigned long gpa)
2432 {
2433 	/* Only bits 0-53 are used for address formation */
2434 	if (!(gpa & ~0x3ff))
2435 		return 0;
2436 
2437 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2438 			       (void *)&vcpu->run->s.regs.vrs, 512);
2439 }
2440 
2441 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2442 {
2443 	if (!test_kvm_facility(vcpu->kvm, 129))
2444 		return 0;
2445 
2446 	/*
2447 	 * The guest VXRS are in the host VXRs due to the lazy
2448 	 * copying in vcpu load/put. We can simply call save_fpu_regs()
2449 	 * to save the current register state because we are in the
2450 	 * middle of a load/put cycle.
2451 	 *
2452 	 * Let's update our copies before we save it into the save area.
2453 	 */
2454 	save_fpu_regs();
2455 
2456 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2457 }
2458 
2459 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2460 {
2461 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2462 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2463 }
2464 
2465 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2466 {
2467 	unsigned int i;
2468 	struct kvm_vcpu *vcpu;
2469 
2470 	kvm_for_each_vcpu(i, vcpu, kvm) {
2471 		__disable_ibs_on_vcpu(vcpu);
2472 	}
2473 }
2474 
2475 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2476 {
2477 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2478 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2479 }
2480 
2481 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2482 {
2483 	int i, online_vcpus, started_vcpus = 0;
2484 
2485 	if (!is_vcpu_stopped(vcpu))
2486 		return;
2487 
2488 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2489 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2490 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
2491 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2492 
2493 	for (i = 0; i < online_vcpus; i++) {
2494 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2495 			started_vcpus++;
2496 	}
2497 
2498 	if (started_vcpus == 0) {
2499 		/* we're the only active VCPU -> speed it up */
2500 		__enable_ibs_on_vcpu(vcpu);
2501 	} else if (started_vcpus == 1) {
2502 		/*
2503 		 * As we are starting a second VCPU, we have to disable
2504 		 * the IBS facility on all VCPUs to remove potentially
2505 		 * oustanding ENABLE requests.
2506 		 */
2507 		__disable_ibs_on_all_vcpus(vcpu->kvm);
2508 	}
2509 
2510 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2511 	/*
2512 	 * Another VCPU might have used IBS while we were offline.
2513 	 * Let's play safe and flush the VCPU at startup.
2514 	 */
2515 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2516 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2517 	return;
2518 }
2519 
2520 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2521 {
2522 	int i, online_vcpus, started_vcpus = 0;
2523 	struct kvm_vcpu *started_vcpu = NULL;
2524 
2525 	if (is_vcpu_stopped(vcpu))
2526 		return;
2527 
2528 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2529 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2530 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
2531 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2532 
2533 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2534 	kvm_s390_clear_stop_irq(vcpu);
2535 
2536 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2537 	__disable_ibs_on_vcpu(vcpu);
2538 
2539 	for (i = 0; i < online_vcpus; i++) {
2540 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2541 			started_vcpus++;
2542 			started_vcpu = vcpu->kvm->vcpus[i];
2543 		}
2544 	}
2545 
2546 	if (started_vcpus == 1) {
2547 		/*
2548 		 * As we only have one VCPU left, we want to enable the
2549 		 * IBS facility for that VCPU to speed it up.
2550 		 */
2551 		__enable_ibs_on_vcpu(started_vcpu);
2552 	}
2553 
2554 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2555 	return;
2556 }
2557 
2558 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2559 				     struct kvm_enable_cap *cap)
2560 {
2561 	int r;
2562 
2563 	if (cap->flags)
2564 		return -EINVAL;
2565 
2566 	switch (cap->cap) {
2567 	case KVM_CAP_S390_CSS_SUPPORT:
2568 		if (!vcpu->kvm->arch.css_support) {
2569 			vcpu->kvm->arch.css_support = 1;
2570 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2571 			trace_kvm_s390_enable_css(vcpu->kvm);
2572 		}
2573 		r = 0;
2574 		break;
2575 	default:
2576 		r = -EINVAL;
2577 		break;
2578 	}
2579 	return r;
2580 }
2581 
2582 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2583 				  struct kvm_s390_mem_op *mop)
2584 {
2585 	void __user *uaddr = (void __user *)mop->buf;
2586 	void *tmpbuf = NULL;
2587 	int r, srcu_idx;
2588 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2589 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
2590 
2591 	if (mop->flags & ~supported_flags)
2592 		return -EINVAL;
2593 
2594 	if (mop->size > MEM_OP_MAX_SIZE)
2595 		return -E2BIG;
2596 
2597 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2598 		tmpbuf = vmalloc(mop->size);
2599 		if (!tmpbuf)
2600 			return -ENOMEM;
2601 	}
2602 
2603 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2604 
2605 	switch (mop->op) {
2606 	case KVM_S390_MEMOP_LOGICAL_READ:
2607 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2608 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2609 			break;
2610 		}
2611 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2612 		if (r == 0) {
2613 			if (copy_to_user(uaddr, tmpbuf, mop->size))
2614 				r = -EFAULT;
2615 		}
2616 		break;
2617 	case KVM_S390_MEMOP_LOGICAL_WRITE:
2618 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2619 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2620 			break;
2621 		}
2622 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2623 			r = -EFAULT;
2624 			break;
2625 		}
2626 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2627 		break;
2628 	default:
2629 		r = -EINVAL;
2630 	}
2631 
2632 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2633 
2634 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2635 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2636 
2637 	vfree(tmpbuf);
2638 	return r;
2639 }
2640 
2641 long kvm_arch_vcpu_ioctl(struct file *filp,
2642 			 unsigned int ioctl, unsigned long arg)
2643 {
2644 	struct kvm_vcpu *vcpu = filp->private_data;
2645 	void __user *argp = (void __user *)arg;
2646 	int idx;
2647 	long r;
2648 
2649 	switch (ioctl) {
2650 	case KVM_S390_IRQ: {
2651 		struct kvm_s390_irq s390irq;
2652 
2653 		r = -EFAULT;
2654 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2655 			break;
2656 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2657 		break;
2658 	}
2659 	case KVM_S390_INTERRUPT: {
2660 		struct kvm_s390_interrupt s390int;
2661 		struct kvm_s390_irq s390irq;
2662 
2663 		r = -EFAULT;
2664 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2665 			break;
2666 		if (s390int_to_s390irq(&s390int, &s390irq))
2667 			return -EINVAL;
2668 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2669 		break;
2670 	}
2671 	case KVM_S390_STORE_STATUS:
2672 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2673 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2674 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2675 		break;
2676 	case KVM_S390_SET_INITIAL_PSW: {
2677 		psw_t psw;
2678 
2679 		r = -EFAULT;
2680 		if (copy_from_user(&psw, argp, sizeof(psw)))
2681 			break;
2682 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2683 		break;
2684 	}
2685 	case KVM_S390_INITIAL_RESET:
2686 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2687 		break;
2688 	case KVM_SET_ONE_REG:
2689 	case KVM_GET_ONE_REG: {
2690 		struct kvm_one_reg reg;
2691 		r = -EFAULT;
2692 		if (copy_from_user(&reg, argp, sizeof(reg)))
2693 			break;
2694 		if (ioctl == KVM_SET_ONE_REG)
2695 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2696 		else
2697 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2698 		break;
2699 	}
2700 #ifdef CONFIG_KVM_S390_UCONTROL
2701 	case KVM_S390_UCAS_MAP: {
2702 		struct kvm_s390_ucas_mapping ucasmap;
2703 
2704 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2705 			r = -EFAULT;
2706 			break;
2707 		}
2708 
2709 		if (!kvm_is_ucontrol(vcpu->kvm)) {
2710 			r = -EINVAL;
2711 			break;
2712 		}
2713 
2714 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2715 				     ucasmap.vcpu_addr, ucasmap.length);
2716 		break;
2717 	}
2718 	case KVM_S390_UCAS_UNMAP: {
2719 		struct kvm_s390_ucas_mapping ucasmap;
2720 
2721 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2722 			r = -EFAULT;
2723 			break;
2724 		}
2725 
2726 		if (!kvm_is_ucontrol(vcpu->kvm)) {
2727 			r = -EINVAL;
2728 			break;
2729 		}
2730 
2731 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2732 			ucasmap.length);
2733 		break;
2734 	}
2735 #endif
2736 	case KVM_S390_VCPU_FAULT: {
2737 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2738 		break;
2739 	}
2740 	case KVM_ENABLE_CAP:
2741 	{
2742 		struct kvm_enable_cap cap;
2743 		r = -EFAULT;
2744 		if (copy_from_user(&cap, argp, sizeof(cap)))
2745 			break;
2746 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2747 		break;
2748 	}
2749 	case KVM_S390_MEM_OP: {
2750 		struct kvm_s390_mem_op mem_op;
2751 
2752 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2753 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2754 		else
2755 			r = -EFAULT;
2756 		break;
2757 	}
2758 	case KVM_S390_SET_IRQ_STATE: {
2759 		struct kvm_s390_irq_state irq_state;
2760 
2761 		r = -EFAULT;
2762 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2763 			break;
2764 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2765 		    irq_state.len == 0 ||
2766 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2767 			r = -EINVAL;
2768 			break;
2769 		}
2770 		r = kvm_s390_set_irq_state(vcpu,
2771 					   (void __user *) irq_state.buf,
2772 					   irq_state.len);
2773 		break;
2774 	}
2775 	case KVM_S390_GET_IRQ_STATE: {
2776 		struct kvm_s390_irq_state irq_state;
2777 
2778 		r = -EFAULT;
2779 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2780 			break;
2781 		if (irq_state.len == 0) {
2782 			r = -EINVAL;
2783 			break;
2784 		}
2785 		r = kvm_s390_get_irq_state(vcpu,
2786 					   (__u8 __user *)  irq_state.buf,
2787 					   irq_state.len);
2788 		break;
2789 	}
2790 	default:
2791 		r = -ENOTTY;
2792 	}
2793 	return r;
2794 }
2795 
2796 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2797 {
2798 #ifdef CONFIG_KVM_S390_UCONTROL
2799 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2800 		 && (kvm_is_ucontrol(vcpu->kvm))) {
2801 		vmf->page = virt_to_page(vcpu->arch.sie_block);
2802 		get_page(vmf->page);
2803 		return 0;
2804 	}
2805 #endif
2806 	return VM_FAULT_SIGBUS;
2807 }
2808 
2809 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2810 			    unsigned long npages)
2811 {
2812 	return 0;
2813 }
2814 
2815 /* Section: memory related */
2816 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2817 				   struct kvm_memory_slot *memslot,
2818 				   const struct kvm_userspace_memory_region *mem,
2819 				   enum kvm_mr_change change)
2820 {
2821 	/* A few sanity checks. We can have memory slots which have to be
2822 	   located/ended at a segment boundary (1MB). The memory in userland is
2823 	   ok to be fragmented into various different vmas. It is okay to mmap()
2824 	   and munmap() stuff in this slot after doing this call at any time */
2825 
2826 	if (mem->userspace_addr & 0xffffful)
2827 		return -EINVAL;
2828 
2829 	if (mem->memory_size & 0xffffful)
2830 		return -EINVAL;
2831 
2832 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2833 		return -EINVAL;
2834 
2835 	return 0;
2836 }
2837 
2838 void kvm_arch_commit_memory_region(struct kvm *kvm,
2839 				const struct kvm_userspace_memory_region *mem,
2840 				const struct kvm_memory_slot *old,
2841 				const struct kvm_memory_slot *new,
2842 				enum kvm_mr_change change)
2843 {
2844 	int rc;
2845 
2846 	/* If the basics of the memslot do not change, we do not want
2847 	 * to update the gmap. Every update causes several unnecessary
2848 	 * segment translation exceptions. This is usually handled just
2849 	 * fine by the normal fault handler + gmap, but it will also
2850 	 * cause faults on the prefix page of running guest CPUs.
2851 	 */
2852 	if (old->userspace_addr == mem->userspace_addr &&
2853 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2854 	    old->npages * PAGE_SIZE == mem->memory_size)
2855 		return;
2856 
2857 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2858 		mem->guest_phys_addr, mem->memory_size);
2859 	if (rc)
2860 		pr_warn("failed to commit memory region\n");
2861 	return;
2862 }
2863 
2864 static int __init kvm_s390_init(void)
2865 {
2866 	if (!sclp.has_sief2) {
2867 		pr_info("SIE not available\n");
2868 		return -ENODEV;
2869 	}
2870 
2871 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2872 }
2873 
2874 static void __exit kvm_s390_exit(void)
2875 {
2876 	kvm_exit();
2877 }
2878 
2879 module_init(kvm_s390_init);
2880 module_exit(kvm_s390_exit);
2881 
2882 /*
2883  * Enable autoloading of the kvm module.
2884  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2885  * since x86 takes a different approach.
2886  */
2887 #include <linux/miscdevice.h>
2888 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2889 MODULE_ALIAS("devname:kvm");
2890