xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision f76f6371643b563a7168a6ba5713ce93caa36ecc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hosting IBM Z kernel virtual machines (s390x)
4  *
5  * Copyright IBM Corp. 2008, 2018
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Heiko Carstens <heiko.carstens@de.ibm.com>
10  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
11  *               Jason J. Herne <jjherne@us.ibm.com>
12  */
13 
14 #define KMSG_COMPONENT "kvm-s390"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/random.h>
28 #include <linux/slab.h>
29 #include <linux/timer.h>
30 #include <linux/vmalloc.h>
31 #include <linux/bitmap.h>
32 #include <linux/sched/signal.h>
33 #include <linux/string.h>
34 
35 #include <asm/asm-offsets.h>
36 #include <asm/lowcore.h>
37 #include <asm/stp.h>
38 #include <asm/pgtable.h>
39 #include <asm/gmap.h>
40 #include <asm/nmi.h>
41 #include <asm/switch_to.h>
42 #include <asm/isc.h>
43 #include <asm/sclp.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
46 #include <asm/ap.h>
47 #include "kvm-s390.h"
48 #include "gaccess.h"
49 
50 #define CREATE_TRACE_POINTS
51 #include "trace.h"
52 #include "trace-s390.h"
53 
54 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
55 #define LOCAL_IRQS 32
56 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
58 
59 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
61 
62 struct kvm_stats_debugfs_item debugfs_entries[] = {
63 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
64 	{ "exit_null", VCPU_STAT(exit_null) },
65 	{ "exit_validity", VCPU_STAT(exit_validity) },
66 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
68 	{ "exit_io_request", VCPU_STAT(exit_io_request) },
69 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
70 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
71 	{ "exit_pei", VCPU_STAT(exit_pei) },
72 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
74 	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
75 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
76 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
77 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
78 	{ "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
79 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
80 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
81 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
82 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
84 	{ "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 	{ "deliver_cputm", VCPU_STAT(deliver_cputm) },
86 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
87 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
88 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
89 	{ "deliver_virtio", VCPU_STAT(deliver_virtio) },
90 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
93 	{ "deliver_program", VCPU_STAT(deliver_program) },
94 	{ "deliver_io", VCPU_STAT(deliver_io) },
95 	{ "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
96 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
97 	{ "inject_ckc", VCPU_STAT(inject_ckc) },
98 	{ "inject_cputm", VCPU_STAT(inject_cputm) },
99 	{ "inject_external_call", VCPU_STAT(inject_external_call) },
100 	{ "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 	{ "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 	{ "inject_io", VM_STAT(inject_io) },
103 	{ "inject_mchk", VCPU_STAT(inject_mchk) },
104 	{ "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 	{ "inject_program", VCPU_STAT(inject_program) },
106 	{ "inject_restart", VCPU_STAT(inject_restart) },
107 	{ "inject_service_signal", VM_STAT(inject_service_signal) },
108 	{ "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 	{ "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 	{ "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 	{ "inject_virtio", VM_STAT(inject_virtio) },
112 	{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 	{ "instruction_gs", VCPU_STAT(instruction_gs) },
114 	{ "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 	{ "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 	{ "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
117 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
118 	{ "instruction_ptff", VCPU_STAT(instruction_ptff) },
119 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
120 	{ "instruction_sck", VCPU_STAT(instruction_sck) },
121 	{ "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
122 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
123 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
125 	{ "instruction_iske", VCPU_STAT(instruction_iske) },
126 	{ "instruction_ri", VCPU_STAT(instruction_ri) },
127 	{ "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 	{ "instruction_sske", VCPU_STAT(instruction_sske) },
129 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
130 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
131 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
133 	{ "instruction_tb", VCPU_STAT(instruction_tb) },
134 	{ "instruction_tpi", VCPU_STAT(instruction_tpi) },
135 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
136 	{ "instruction_tsch", VCPU_STAT(instruction_tsch) },
137 	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
138 	{ "instruction_sie", VCPU_STAT(instruction_sie) },
139 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
140 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
141 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
142 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
143 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
145 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
146 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
148 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
149 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
152 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
155 	{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 	{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 	{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
158 	{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
159 	{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
160 	{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
161 	{ "instruction_diag_other", VCPU_STAT(diagnose_other) },
162 	{ NULL }
163 };
164 
165 struct kvm_s390_tod_clock_ext {
166 	__u8 epoch_idx;
167 	__u64 tod;
168 	__u8 reserved[7];
169 } __packed;
170 
171 /* allow nested virtualization in KVM (if enabled by user space) */
172 static int nested;
173 module_param(nested, int, S_IRUGO);
174 MODULE_PARM_DESC(nested, "Nested virtualization support");
175 
176 /* allow 1m huge page guest backing, if !nested */
177 static int hpage;
178 module_param(hpage, int, 0444);
179 MODULE_PARM_DESC(hpage, "1m huge page backing support");
180 
181 /* maximum percentage of steal time for polling.  >100 is treated like 100 */
182 static u8 halt_poll_max_steal = 10;
183 module_param(halt_poll_max_steal, byte, 0644);
184 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
185 
186 /*
187  * For now we handle at most 16 double words as this is what the s390 base
188  * kernel handles and stores in the prefix page. If we ever need to go beyond
189  * this, this requires changes to code, but the external uapi can stay.
190  */
191 #define SIZE_INTERNAL 16
192 
193 /*
194  * Base feature mask that defines default mask for facilities. Consists of the
195  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
196  */
197 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
198 /*
199  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
200  * and defines the facilities that can be enabled via a cpu model.
201  */
202 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
203 
204 static unsigned long kvm_s390_fac_size(void)
205 {
206 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
207 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
208 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
209 		sizeof(S390_lowcore.stfle_fac_list));
210 
211 	return SIZE_INTERNAL;
212 }
213 
214 /* available cpu features supported by kvm */
215 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
216 /* available subfunctions indicated via query / "test bit" */
217 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
218 
219 static struct gmap_notifier gmap_notifier;
220 static struct gmap_notifier vsie_gmap_notifier;
221 debug_info_t *kvm_s390_dbf;
222 
223 /* Section: not file related */
224 int kvm_arch_hardware_enable(void)
225 {
226 	/* every s390 is virtualization enabled ;-) */
227 	return 0;
228 }
229 
230 int kvm_arch_check_processor_compat(void)
231 {
232 	return 0;
233 }
234 
235 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
236 			      unsigned long end);
237 
238 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
239 {
240 	u8 delta_idx = 0;
241 
242 	/*
243 	 * The TOD jumps by delta, we have to compensate this by adding
244 	 * -delta to the epoch.
245 	 */
246 	delta = -delta;
247 
248 	/* sign-extension - we're adding to signed values below */
249 	if ((s64)delta < 0)
250 		delta_idx = -1;
251 
252 	scb->epoch += delta;
253 	if (scb->ecd & ECD_MEF) {
254 		scb->epdx += delta_idx;
255 		if (scb->epoch < delta)
256 			scb->epdx += 1;
257 	}
258 }
259 
260 /*
261  * This callback is executed during stop_machine(). All CPUs are therefore
262  * temporarily stopped. In order not to change guest behavior, we have to
263  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
264  * so a CPU won't be stopped while calculating with the epoch.
265  */
266 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
267 			  void *v)
268 {
269 	struct kvm *kvm;
270 	struct kvm_vcpu *vcpu;
271 	int i;
272 	unsigned long long *delta = v;
273 
274 	list_for_each_entry(kvm, &vm_list, vm_list) {
275 		kvm_for_each_vcpu(i, vcpu, kvm) {
276 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
277 			if (i == 0) {
278 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
279 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
280 			}
281 			if (vcpu->arch.cputm_enabled)
282 				vcpu->arch.cputm_start += *delta;
283 			if (vcpu->arch.vsie_block)
284 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
285 						   *delta);
286 		}
287 	}
288 	return NOTIFY_OK;
289 }
290 
291 static struct notifier_block kvm_clock_notifier = {
292 	.notifier_call = kvm_clock_sync,
293 };
294 
295 int kvm_arch_hardware_setup(void)
296 {
297 	gmap_notifier.notifier_call = kvm_gmap_notifier;
298 	gmap_register_pte_notifier(&gmap_notifier);
299 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
300 	gmap_register_pte_notifier(&vsie_gmap_notifier);
301 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
302 				       &kvm_clock_notifier);
303 	return 0;
304 }
305 
306 void kvm_arch_hardware_unsetup(void)
307 {
308 	gmap_unregister_pte_notifier(&gmap_notifier);
309 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
310 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
311 					 &kvm_clock_notifier);
312 }
313 
314 static void allow_cpu_feat(unsigned long nr)
315 {
316 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
317 }
318 
319 static inline int plo_test_bit(unsigned char nr)
320 {
321 	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
322 	int cc;
323 
324 	asm volatile(
325 		/* Parameter registers are ignored for "test bit" */
326 		"	plo	0,0,0,0(0)\n"
327 		"	ipm	%0\n"
328 		"	srl	%0,28\n"
329 		: "=d" (cc)
330 		: "d" (r0)
331 		: "cc");
332 	return cc == 0;
333 }
334 
335 static inline void __insn32_query(unsigned int opcode, u8 query[32])
336 {
337 	register unsigned long r0 asm("0") = 0;	/* query function */
338 	register unsigned long r1 asm("1") = (unsigned long) query;
339 
340 	asm volatile(
341 		/* Parameter regs are ignored */
342 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
343 		: "=m" (*query)
344 		: "d" (r0), "a" (r1), [opc] "i" (opcode)
345 		: "cc");
346 }
347 
348 #define INSN_SORTL 0xb938
349 #define INSN_DFLTCC 0xb939
350 
351 static void kvm_s390_cpu_feat_init(void)
352 {
353 	int i;
354 
355 	for (i = 0; i < 256; ++i) {
356 		if (plo_test_bit(i))
357 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
358 	}
359 
360 	if (test_facility(28)) /* TOD-clock steering */
361 		ptff(kvm_s390_available_subfunc.ptff,
362 		     sizeof(kvm_s390_available_subfunc.ptff),
363 		     PTFF_QAF);
364 
365 	if (test_facility(17)) { /* MSA */
366 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
367 			      kvm_s390_available_subfunc.kmac);
368 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
369 			      kvm_s390_available_subfunc.kmc);
370 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
371 			      kvm_s390_available_subfunc.km);
372 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
373 			      kvm_s390_available_subfunc.kimd);
374 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
375 			      kvm_s390_available_subfunc.klmd);
376 	}
377 	if (test_facility(76)) /* MSA3 */
378 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
379 			      kvm_s390_available_subfunc.pckmo);
380 	if (test_facility(77)) { /* MSA4 */
381 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
382 			      kvm_s390_available_subfunc.kmctr);
383 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
384 			      kvm_s390_available_subfunc.kmf);
385 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
386 			      kvm_s390_available_subfunc.kmo);
387 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
388 			      kvm_s390_available_subfunc.pcc);
389 	}
390 	if (test_facility(57)) /* MSA5 */
391 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
392 			      kvm_s390_available_subfunc.ppno);
393 
394 	if (test_facility(146)) /* MSA8 */
395 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
396 			      kvm_s390_available_subfunc.kma);
397 
398 	if (test_facility(155)) /* MSA9 */
399 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
400 			      kvm_s390_available_subfunc.kdsa);
401 
402 	if (test_facility(150)) /* SORTL */
403 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
404 
405 	if (test_facility(151)) /* DFLTCC */
406 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
407 
408 	if (MACHINE_HAS_ESOP)
409 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
410 	/*
411 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
412 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
413 	 */
414 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
415 	    !test_facility(3) || !nested)
416 		return;
417 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
418 	if (sclp.has_64bscao)
419 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
420 	if (sclp.has_siif)
421 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
422 	if (sclp.has_gpere)
423 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
424 	if (sclp.has_gsls)
425 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
426 	if (sclp.has_ib)
427 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
428 	if (sclp.has_cei)
429 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
430 	if (sclp.has_ibs)
431 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
432 	if (sclp.has_kss)
433 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
434 	/*
435 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
436 	 * all skey handling functions read/set the skey from the PGSTE
437 	 * instead of the real storage key.
438 	 *
439 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
440 	 * pages being detected as preserved although they are resident.
441 	 *
442 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
443 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
444 	 *
445 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
446 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
447 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
448 	 *
449 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
450 	 * cannot easily shadow the SCA because of the ipte lock.
451 	 */
452 }
453 
454 int kvm_arch_init(void *opaque)
455 {
456 	int rc = -ENOMEM;
457 
458 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
459 	if (!kvm_s390_dbf)
460 		return -ENOMEM;
461 
462 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
463 		goto out;
464 
465 	kvm_s390_cpu_feat_init();
466 
467 	/* Register floating interrupt controller interface. */
468 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
469 	if (rc) {
470 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
471 		goto out;
472 	}
473 
474 	rc = kvm_s390_gib_init(GAL_ISC);
475 	if (rc)
476 		goto out;
477 
478 	return 0;
479 
480 out:
481 	kvm_arch_exit();
482 	return rc;
483 }
484 
485 void kvm_arch_exit(void)
486 {
487 	kvm_s390_gib_destroy();
488 	debug_unregister(kvm_s390_dbf);
489 }
490 
491 /* Section: device related */
492 long kvm_arch_dev_ioctl(struct file *filp,
493 			unsigned int ioctl, unsigned long arg)
494 {
495 	if (ioctl == KVM_S390_ENABLE_SIE)
496 		return s390_enable_sie();
497 	return -EINVAL;
498 }
499 
500 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
501 {
502 	int r;
503 
504 	switch (ext) {
505 	case KVM_CAP_S390_PSW:
506 	case KVM_CAP_S390_GMAP:
507 	case KVM_CAP_SYNC_MMU:
508 #ifdef CONFIG_KVM_S390_UCONTROL
509 	case KVM_CAP_S390_UCONTROL:
510 #endif
511 	case KVM_CAP_ASYNC_PF:
512 	case KVM_CAP_SYNC_REGS:
513 	case KVM_CAP_ONE_REG:
514 	case KVM_CAP_ENABLE_CAP:
515 	case KVM_CAP_S390_CSS_SUPPORT:
516 	case KVM_CAP_IOEVENTFD:
517 	case KVM_CAP_DEVICE_CTRL:
518 	case KVM_CAP_S390_IRQCHIP:
519 	case KVM_CAP_VM_ATTRIBUTES:
520 	case KVM_CAP_MP_STATE:
521 	case KVM_CAP_IMMEDIATE_EXIT:
522 	case KVM_CAP_S390_INJECT_IRQ:
523 	case KVM_CAP_S390_USER_SIGP:
524 	case KVM_CAP_S390_USER_STSI:
525 	case KVM_CAP_S390_SKEYS:
526 	case KVM_CAP_S390_IRQ_STATE:
527 	case KVM_CAP_S390_USER_INSTR0:
528 	case KVM_CAP_S390_CMMA_MIGRATION:
529 	case KVM_CAP_S390_AIS:
530 	case KVM_CAP_S390_AIS_MIGRATION:
531 		r = 1;
532 		break;
533 	case KVM_CAP_S390_HPAGE_1M:
534 		r = 0;
535 		if (hpage && !kvm_is_ucontrol(kvm))
536 			r = 1;
537 		break;
538 	case KVM_CAP_S390_MEM_OP:
539 		r = MEM_OP_MAX_SIZE;
540 		break;
541 	case KVM_CAP_NR_VCPUS:
542 	case KVM_CAP_MAX_VCPUS:
543 	case KVM_CAP_MAX_VCPU_ID:
544 		r = KVM_S390_BSCA_CPU_SLOTS;
545 		if (!kvm_s390_use_sca_entries())
546 			r = KVM_MAX_VCPUS;
547 		else if (sclp.has_esca && sclp.has_64bscao)
548 			r = KVM_S390_ESCA_CPU_SLOTS;
549 		break;
550 	case KVM_CAP_S390_COW:
551 		r = MACHINE_HAS_ESOP;
552 		break;
553 	case KVM_CAP_S390_VECTOR_REGISTERS:
554 		r = MACHINE_HAS_VX;
555 		break;
556 	case KVM_CAP_S390_RI:
557 		r = test_facility(64);
558 		break;
559 	case KVM_CAP_S390_GS:
560 		r = test_facility(133);
561 		break;
562 	case KVM_CAP_S390_BPB:
563 		r = test_facility(82);
564 		break;
565 	default:
566 		r = 0;
567 	}
568 	return r;
569 }
570 
571 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
572 				    struct kvm_memory_slot *memslot)
573 {
574 	int i;
575 	gfn_t cur_gfn, last_gfn;
576 	unsigned long gaddr, vmaddr;
577 	struct gmap *gmap = kvm->arch.gmap;
578 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
579 
580 	/* Loop over all guest segments */
581 	cur_gfn = memslot->base_gfn;
582 	last_gfn = memslot->base_gfn + memslot->npages;
583 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
584 		gaddr = gfn_to_gpa(cur_gfn);
585 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
586 		if (kvm_is_error_hva(vmaddr))
587 			continue;
588 
589 		bitmap_zero(bitmap, _PAGE_ENTRIES);
590 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
591 		for (i = 0; i < _PAGE_ENTRIES; i++) {
592 			if (test_bit(i, bitmap))
593 				mark_page_dirty(kvm, cur_gfn + i);
594 		}
595 
596 		if (fatal_signal_pending(current))
597 			return;
598 		cond_resched();
599 	}
600 }
601 
602 /* Section: vm related */
603 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
604 
605 /*
606  * Get (and clear) the dirty memory log for a memory slot.
607  */
608 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
609 			       struct kvm_dirty_log *log)
610 {
611 	int r;
612 	unsigned long n;
613 	struct kvm_memslots *slots;
614 	struct kvm_memory_slot *memslot;
615 	int is_dirty = 0;
616 
617 	if (kvm_is_ucontrol(kvm))
618 		return -EINVAL;
619 
620 	mutex_lock(&kvm->slots_lock);
621 
622 	r = -EINVAL;
623 	if (log->slot >= KVM_USER_MEM_SLOTS)
624 		goto out;
625 
626 	slots = kvm_memslots(kvm);
627 	memslot = id_to_memslot(slots, log->slot);
628 	r = -ENOENT;
629 	if (!memslot->dirty_bitmap)
630 		goto out;
631 
632 	kvm_s390_sync_dirty_log(kvm, memslot);
633 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
634 	if (r)
635 		goto out;
636 
637 	/* Clear the dirty log */
638 	if (is_dirty) {
639 		n = kvm_dirty_bitmap_bytes(memslot);
640 		memset(memslot->dirty_bitmap, 0, n);
641 	}
642 	r = 0;
643 out:
644 	mutex_unlock(&kvm->slots_lock);
645 	return r;
646 }
647 
648 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
649 {
650 	unsigned int i;
651 	struct kvm_vcpu *vcpu;
652 
653 	kvm_for_each_vcpu(i, vcpu, kvm) {
654 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
655 	}
656 }
657 
658 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
659 {
660 	int r;
661 
662 	if (cap->flags)
663 		return -EINVAL;
664 
665 	switch (cap->cap) {
666 	case KVM_CAP_S390_IRQCHIP:
667 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
668 		kvm->arch.use_irqchip = 1;
669 		r = 0;
670 		break;
671 	case KVM_CAP_S390_USER_SIGP:
672 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
673 		kvm->arch.user_sigp = 1;
674 		r = 0;
675 		break;
676 	case KVM_CAP_S390_VECTOR_REGISTERS:
677 		mutex_lock(&kvm->lock);
678 		if (kvm->created_vcpus) {
679 			r = -EBUSY;
680 		} else if (MACHINE_HAS_VX) {
681 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
682 			set_kvm_facility(kvm->arch.model.fac_list, 129);
683 			if (test_facility(134)) {
684 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
685 				set_kvm_facility(kvm->arch.model.fac_list, 134);
686 			}
687 			if (test_facility(135)) {
688 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
689 				set_kvm_facility(kvm->arch.model.fac_list, 135);
690 			}
691 			if (test_facility(148)) {
692 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
693 				set_kvm_facility(kvm->arch.model.fac_list, 148);
694 			}
695 			if (test_facility(152)) {
696 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
697 				set_kvm_facility(kvm->arch.model.fac_list, 152);
698 			}
699 			r = 0;
700 		} else
701 			r = -EINVAL;
702 		mutex_unlock(&kvm->lock);
703 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
704 			 r ? "(not available)" : "(success)");
705 		break;
706 	case KVM_CAP_S390_RI:
707 		r = -EINVAL;
708 		mutex_lock(&kvm->lock);
709 		if (kvm->created_vcpus) {
710 			r = -EBUSY;
711 		} else if (test_facility(64)) {
712 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
713 			set_kvm_facility(kvm->arch.model.fac_list, 64);
714 			r = 0;
715 		}
716 		mutex_unlock(&kvm->lock);
717 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
718 			 r ? "(not available)" : "(success)");
719 		break;
720 	case KVM_CAP_S390_AIS:
721 		mutex_lock(&kvm->lock);
722 		if (kvm->created_vcpus) {
723 			r = -EBUSY;
724 		} else {
725 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
726 			set_kvm_facility(kvm->arch.model.fac_list, 72);
727 			r = 0;
728 		}
729 		mutex_unlock(&kvm->lock);
730 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
731 			 r ? "(not available)" : "(success)");
732 		break;
733 	case KVM_CAP_S390_GS:
734 		r = -EINVAL;
735 		mutex_lock(&kvm->lock);
736 		if (kvm->created_vcpus) {
737 			r = -EBUSY;
738 		} else if (test_facility(133)) {
739 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
740 			set_kvm_facility(kvm->arch.model.fac_list, 133);
741 			r = 0;
742 		}
743 		mutex_unlock(&kvm->lock);
744 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
745 			 r ? "(not available)" : "(success)");
746 		break;
747 	case KVM_CAP_S390_HPAGE_1M:
748 		mutex_lock(&kvm->lock);
749 		if (kvm->created_vcpus)
750 			r = -EBUSY;
751 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
752 			r = -EINVAL;
753 		else {
754 			r = 0;
755 			down_write(&kvm->mm->mmap_sem);
756 			kvm->mm->context.allow_gmap_hpage_1m = 1;
757 			up_write(&kvm->mm->mmap_sem);
758 			/*
759 			 * We might have to create fake 4k page
760 			 * tables. To avoid that the hardware works on
761 			 * stale PGSTEs, we emulate these instructions.
762 			 */
763 			kvm->arch.use_skf = 0;
764 			kvm->arch.use_pfmfi = 0;
765 		}
766 		mutex_unlock(&kvm->lock);
767 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
768 			 r ? "(not available)" : "(success)");
769 		break;
770 	case KVM_CAP_S390_USER_STSI:
771 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
772 		kvm->arch.user_stsi = 1;
773 		r = 0;
774 		break;
775 	case KVM_CAP_S390_USER_INSTR0:
776 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
777 		kvm->arch.user_instr0 = 1;
778 		icpt_operexc_on_all_vcpus(kvm);
779 		r = 0;
780 		break;
781 	default:
782 		r = -EINVAL;
783 		break;
784 	}
785 	return r;
786 }
787 
788 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
789 {
790 	int ret;
791 
792 	switch (attr->attr) {
793 	case KVM_S390_VM_MEM_LIMIT_SIZE:
794 		ret = 0;
795 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
796 			 kvm->arch.mem_limit);
797 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
798 			ret = -EFAULT;
799 		break;
800 	default:
801 		ret = -ENXIO;
802 		break;
803 	}
804 	return ret;
805 }
806 
807 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
808 {
809 	int ret;
810 	unsigned int idx;
811 	switch (attr->attr) {
812 	case KVM_S390_VM_MEM_ENABLE_CMMA:
813 		ret = -ENXIO;
814 		if (!sclp.has_cmma)
815 			break;
816 
817 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
818 		mutex_lock(&kvm->lock);
819 		if (kvm->created_vcpus)
820 			ret = -EBUSY;
821 		else if (kvm->mm->context.allow_gmap_hpage_1m)
822 			ret = -EINVAL;
823 		else {
824 			kvm->arch.use_cmma = 1;
825 			/* Not compatible with cmma. */
826 			kvm->arch.use_pfmfi = 0;
827 			ret = 0;
828 		}
829 		mutex_unlock(&kvm->lock);
830 		break;
831 	case KVM_S390_VM_MEM_CLR_CMMA:
832 		ret = -ENXIO;
833 		if (!sclp.has_cmma)
834 			break;
835 		ret = -EINVAL;
836 		if (!kvm->arch.use_cmma)
837 			break;
838 
839 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
840 		mutex_lock(&kvm->lock);
841 		idx = srcu_read_lock(&kvm->srcu);
842 		s390_reset_cmma(kvm->arch.gmap->mm);
843 		srcu_read_unlock(&kvm->srcu, idx);
844 		mutex_unlock(&kvm->lock);
845 		ret = 0;
846 		break;
847 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
848 		unsigned long new_limit;
849 
850 		if (kvm_is_ucontrol(kvm))
851 			return -EINVAL;
852 
853 		if (get_user(new_limit, (u64 __user *)attr->addr))
854 			return -EFAULT;
855 
856 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
857 		    new_limit > kvm->arch.mem_limit)
858 			return -E2BIG;
859 
860 		if (!new_limit)
861 			return -EINVAL;
862 
863 		/* gmap_create takes last usable address */
864 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
865 			new_limit -= 1;
866 
867 		ret = -EBUSY;
868 		mutex_lock(&kvm->lock);
869 		if (!kvm->created_vcpus) {
870 			/* gmap_create will round the limit up */
871 			struct gmap *new = gmap_create(current->mm, new_limit);
872 
873 			if (!new) {
874 				ret = -ENOMEM;
875 			} else {
876 				gmap_remove(kvm->arch.gmap);
877 				new->private = kvm;
878 				kvm->arch.gmap = new;
879 				ret = 0;
880 			}
881 		}
882 		mutex_unlock(&kvm->lock);
883 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
884 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
885 			 (void *) kvm->arch.gmap->asce);
886 		break;
887 	}
888 	default:
889 		ret = -ENXIO;
890 		break;
891 	}
892 	return ret;
893 }
894 
895 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
896 
897 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
898 {
899 	struct kvm_vcpu *vcpu;
900 	int i;
901 
902 	kvm_s390_vcpu_block_all(kvm);
903 
904 	kvm_for_each_vcpu(i, vcpu, kvm) {
905 		kvm_s390_vcpu_crypto_setup(vcpu);
906 		/* recreate the shadow crycb by leaving the VSIE handler */
907 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
908 	}
909 
910 	kvm_s390_vcpu_unblock_all(kvm);
911 }
912 
913 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
914 {
915 	mutex_lock(&kvm->lock);
916 	switch (attr->attr) {
917 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
918 		if (!test_kvm_facility(kvm, 76)) {
919 			mutex_unlock(&kvm->lock);
920 			return -EINVAL;
921 		}
922 		get_random_bytes(
923 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
924 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
925 		kvm->arch.crypto.aes_kw = 1;
926 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
927 		break;
928 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
929 		if (!test_kvm_facility(kvm, 76)) {
930 			mutex_unlock(&kvm->lock);
931 			return -EINVAL;
932 		}
933 		get_random_bytes(
934 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
935 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
936 		kvm->arch.crypto.dea_kw = 1;
937 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
938 		break;
939 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
940 		if (!test_kvm_facility(kvm, 76)) {
941 			mutex_unlock(&kvm->lock);
942 			return -EINVAL;
943 		}
944 		kvm->arch.crypto.aes_kw = 0;
945 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
946 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
947 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
948 		break;
949 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
950 		if (!test_kvm_facility(kvm, 76)) {
951 			mutex_unlock(&kvm->lock);
952 			return -EINVAL;
953 		}
954 		kvm->arch.crypto.dea_kw = 0;
955 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
956 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
957 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
958 		break;
959 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
960 		if (!ap_instructions_available()) {
961 			mutex_unlock(&kvm->lock);
962 			return -EOPNOTSUPP;
963 		}
964 		kvm->arch.crypto.apie = 1;
965 		break;
966 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
967 		if (!ap_instructions_available()) {
968 			mutex_unlock(&kvm->lock);
969 			return -EOPNOTSUPP;
970 		}
971 		kvm->arch.crypto.apie = 0;
972 		break;
973 	default:
974 		mutex_unlock(&kvm->lock);
975 		return -ENXIO;
976 	}
977 
978 	kvm_s390_vcpu_crypto_reset_all(kvm);
979 	mutex_unlock(&kvm->lock);
980 	return 0;
981 }
982 
983 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
984 {
985 	int cx;
986 	struct kvm_vcpu *vcpu;
987 
988 	kvm_for_each_vcpu(cx, vcpu, kvm)
989 		kvm_s390_sync_request(req, vcpu);
990 }
991 
992 /*
993  * Must be called with kvm->srcu held to avoid races on memslots, and with
994  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
995  */
996 static int kvm_s390_vm_start_migration(struct kvm *kvm)
997 {
998 	struct kvm_memory_slot *ms;
999 	struct kvm_memslots *slots;
1000 	unsigned long ram_pages = 0;
1001 	int slotnr;
1002 
1003 	/* migration mode already enabled */
1004 	if (kvm->arch.migration_mode)
1005 		return 0;
1006 	slots = kvm_memslots(kvm);
1007 	if (!slots || !slots->used_slots)
1008 		return -EINVAL;
1009 
1010 	if (!kvm->arch.use_cmma) {
1011 		kvm->arch.migration_mode = 1;
1012 		return 0;
1013 	}
1014 	/* mark all the pages in active slots as dirty */
1015 	for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1016 		ms = slots->memslots + slotnr;
1017 		if (!ms->dirty_bitmap)
1018 			return -EINVAL;
1019 		/*
1020 		 * The second half of the bitmap is only used on x86,
1021 		 * and would be wasted otherwise, so we put it to good
1022 		 * use here to keep track of the state of the storage
1023 		 * attributes.
1024 		 */
1025 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1026 		ram_pages += ms->npages;
1027 	}
1028 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1029 	kvm->arch.migration_mode = 1;
1030 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1031 	return 0;
1032 }
1033 
1034 /*
1035  * Must be called with kvm->slots_lock to avoid races with ourselves and
1036  * kvm_s390_vm_start_migration.
1037  */
1038 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1039 {
1040 	/* migration mode already disabled */
1041 	if (!kvm->arch.migration_mode)
1042 		return 0;
1043 	kvm->arch.migration_mode = 0;
1044 	if (kvm->arch.use_cmma)
1045 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1046 	return 0;
1047 }
1048 
1049 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1050 				     struct kvm_device_attr *attr)
1051 {
1052 	int res = -ENXIO;
1053 
1054 	mutex_lock(&kvm->slots_lock);
1055 	switch (attr->attr) {
1056 	case KVM_S390_VM_MIGRATION_START:
1057 		res = kvm_s390_vm_start_migration(kvm);
1058 		break;
1059 	case KVM_S390_VM_MIGRATION_STOP:
1060 		res = kvm_s390_vm_stop_migration(kvm);
1061 		break;
1062 	default:
1063 		break;
1064 	}
1065 	mutex_unlock(&kvm->slots_lock);
1066 
1067 	return res;
1068 }
1069 
1070 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1071 				     struct kvm_device_attr *attr)
1072 {
1073 	u64 mig = kvm->arch.migration_mode;
1074 
1075 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1076 		return -ENXIO;
1077 
1078 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1079 		return -EFAULT;
1080 	return 0;
1081 }
1082 
1083 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1084 {
1085 	struct kvm_s390_vm_tod_clock gtod;
1086 
1087 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1088 		return -EFAULT;
1089 
1090 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1091 		return -EINVAL;
1092 	kvm_s390_set_tod_clock(kvm, &gtod);
1093 
1094 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1095 		gtod.epoch_idx, gtod.tod);
1096 
1097 	return 0;
1098 }
1099 
1100 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1101 {
1102 	u8 gtod_high;
1103 
1104 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1105 					   sizeof(gtod_high)))
1106 		return -EFAULT;
1107 
1108 	if (gtod_high != 0)
1109 		return -EINVAL;
1110 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1111 
1112 	return 0;
1113 }
1114 
1115 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1116 {
1117 	struct kvm_s390_vm_tod_clock gtod = { 0 };
1118 
1119 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1120 			   sizeof(gtod.tod)))
1121 		return -EFAULT;
1122 
1123 	kvm_s390_set_tod_clock(kvm, &gtod);
1124 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1125 	return 0;
1126 }
1127 
1128 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1129 {
1130 	int ret;
1131 
1132 	if (attr->flags)
1133 		return -EINVAL;
1134 
1135 	switch (attr->attr) {
1136 	case KVM_S390_VM_TOD_EXT:
1137 		ret = kvm_s390_set_tod_ext(kvm, attr);
1138 		break;
1139 	case KVM_S390_VM_TOD_HIGH:
1140 		ret = kvm_s390_set_tod_high(kvm, attr);
1141 		break;
1142 	case KVM_S390_VM_TOD_LOW:
1143 		ret = kvm_s390_set_tod_low(kvm, attr);
1144 		break;
1145 	default:
1146 		ret = -ENXIO;
1147 		break;
1148 	}
1149 	return ret;
1150 }
1151 
1152 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1153 				   struct kvm_s390_vm_tod_clock *gtod)
1154 {
1155 	struct kvm_s390_tod_clock_ext htod;
1156 
1157 	preempt_disable();
1158 
1159 	get_tod_clock_ext((char *)&htod);
1160 
1161 	gtod->tod = htod.tod + kvm->arch.epoch;
1162 	gtod->epoch_idx = 0;
1163 	if (test_kvm_facility(kvm, 139)) {
1164 		gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1165 		if (gtod->tod < htod.tod)
1166 			gtod->epoch_idx += 1;
1167 	}
1168 
1169 	preempt_enable();
1170 }
1171 
1172 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1173 {
1174 	struct kvm_s390_vm_tod_clock gtod;
1175 
1176 	memset(&gtod, 0, sizeof(gtod));
1177 	kvm_s390_get_tod_clock(kvm, &gtod);
1178 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1179 		return -EFAULT;
1180 
1181 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1182 		gtod.epoch_idx, gtod.tod);
1183 	return 0;
1184 }
1185 
1186 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1187 {
1188 	u8 gtod_high = 0;
1189 
1190 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1191 					 sizeof(gtod_high)))
1192 		return -EFAULT;
1193 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1194 
1195 	return 0;
1196 }
1197 
1198 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1199 {
1200 	u64 gtod;
1201 
1202 	gtod = kvm_s390_get_tod_clock_fast(kvm);
1203 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1204 		return -EFAULT;
1205 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1206 
1207 	return 0;
1208 }
1209 
1210 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1211 {
1212 	int ret;
1213 
1214 	if (attr->flags)
1215 		return -EINVAL;
1216 
1217 	switch (attr->attr) {
1218 	case KVM_S390_VM_TOD_EXT:
1219 		ret = kvm_s390_get_tod_ext(kvm, attr);
1220 		break;
1221 	case KVM_S390_VM_TOD_HIGH:
1222 		ret = kvm_s390_get_tod_high(kvm, attr);
1223 		break;
1224 	case KVM_S390_VM_TOD_LOW:
1225 		ret = kvm_s390_get_tod_low(kvm, attr);
1226 		break;
1227 	default:
1228 		ret = -ENXIO;
1229 		break;
1230 	}
1231 	return ret;
1232 }
1233 
1234 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1235 {
1236 	struct kvm_s390_vm_cpu_processor *proc;
1237 	u16 lowest_ibc, unblocked_ibc;
1238 	int ret = 0;
1239 
1240 	mutex_lock(&kvm->lock);
1241 	if (kvm->created_vcpus) {
1242 		ret = -EBUSY;
1243 		goto out;
1244 	}
1245 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1246 	if (!proc) {
1247 		ret = -ENOMEM;
1248 		goto out;
1249 	}
1250 	if (!copy_from_user(proc, (void __user *)attr->addr,
1251 			    sizeof(*proc))) {
1252 		kvm->arch.model.cpuid = proc->cpuid;
1253 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1254 		unblocked_ibc = sclp.ibc & 0xfff;
1255 		if (lowest_ibc && proc->ibc) {
1256 			if (proc->ibc > unblocked_ibc)
1257 				kvm->arch.model.ibc = unblocked_ibc;
1258 			else if (proc->ibc < lowest_ibc)
1259 				kvm->arch.model.ibc = lowest_ibc;
1260 			else
1261 				kvm->arch.model.ibc = proc->ibc;
1262 		}
1263 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1264 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1265 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1266 			 kvm->arch.model.ibc,
1267 			 kvm->arch.model.cpuid);
1268 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1269 			 kvm->arch.model.fac_list[0],
1270 			 kvm->arch.model.fac_list[1],
1271 			 kvm->arch.model.fac_list[2]);
1272 	} else
1273 		ret = -EFAULT;
1274 	kfree(proc);
1275 out:
1276 	mutex_unlock(&kvm->lock);
1277 	return ret;
1278 }
1279 
1280 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1281 				       struct kvm_device_attr *attr)
1282 {
1283 	struct kvm_s390_vm_cpu_feat data;
1284 
1285 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1286 		return -EFAULT;
1287 	if (!bitmap_subset((unsigned long *) data.feat,
1288 			   kvm_s390_available_cpu_feat,
1289 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1290 		return -EINVAL;
1291 
1292 	mutex_lock(&kvm->lock);
1293 	if (kvm->created_vcpus) {
1294 		mutex_unlock(&kvm->lock);
1295 		return -EBUSY;
1296 	}
1297 	bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1298 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
1299 	mutex_unlock(&kvm->lock);
1300 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1301 			 data.feat[0],
1302 			 data.feat[1],
1303 			 data.feat[2]);
1304 	return 0;
1305 }
1306 
1307 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1308 					  struct kvm_device_attr *attr)
1309 {
1310 	mutex_lock(&kvm->lock);
1311 	if (kvm->created_vcpus) {
1312 		mutex_unlock(&kvm->lock);
1313 		return -EBUSY;
1314 	}
1315 
1316 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1317 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1318 		mutex_unlock(&kvm->lock);
1319 		return -EFAULT;
1320 	}
1321 	mutex_unlock(&kvm->lock);
1322 
1323 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1324 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1325 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1326 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1327 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1328 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1329 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1330 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1331 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1332 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1333 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1334 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1335 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1336 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1337 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1338 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1339 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1340 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1341 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1342 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1343 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1344 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1345 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1346 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1347 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1348 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1349 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1350 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1351 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1352 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1353 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1354 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1355 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1356 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1357 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1358 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1359 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1360 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1361 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1362 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1363 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1364 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1365 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1366 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1367 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1368 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1369 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1370 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1371 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1372 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1373 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1374 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1375 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1376 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1377 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1378 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1379 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1380 
1381 	return 0;
1382 }
1383 
1384 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1385 {
1386 	int ret = -ENXIO;
1387 
1388 	switch (attr->attr) {
1389 	case KVM_S390_VM_CPU_PROCESSOR:
1390 		ret = kvm_s390_set_processor(kvm, attr);
1391 		break;
1392 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1393 		ret = kvm_s390_set_processor_feat(kvm, attr);
1394 		break;
1395 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1396 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1397 		break;
1398 	}
1399 	return ret;
1400 }
1401 
1402 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1403 {
1404 	struct kvm_s390_vm_cpu_processor *proc;
1405 	int ret = 0;
1406 
1407 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1408 	if (!proc) {
1409 		ret = -ENOMEM;
1410 		goto out;
1411 	}
1412 	proc->cpuid = kvm->arch.model.cpuid;
1413 	proc->ibc = kvm->arch.model.ibc;
1414 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1415 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1416 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1417 		 kvm->arch.model.ibc,
1418 		 kvm->arch.model.cpuid);
1419 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1420 		 kvm->arch.model.fac_list[0],
1421 		 kvm->arch.model.fac_list[1],
1422 		 kvm->arch.model.fac_list[2]);
1423 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1424 		ret = -EFAULT;
1425 	kfree(proc);
1426 out:
1427 	return ret;
1428 }
1429 
1430 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1431 {
1432 	struct kvm_s390_vm_cpu_machine *mach;
1433 	int ret = 0;
1434 
1435 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1436 	if (!mach) {
1437 		ret = -ENOMEM;
1438 		goto out;
1439 	}
1440 	get_cpu_id((struct cpuid *) &mach->cpuid);
1441 	mach->ibc = sclp.ibc;
1442 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1443 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1444 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
1445 	       sizeof(S390_lowcore.stfle_fac_list));
1446 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1447 		 kvm->arch.model.ibc,
1448 		 kvm->arch.model.cpuid);
1449 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1450 		 mach->fac_mask[0],
1451 		 mach->fac_mask[1],
1452 		 mach->fac_mask[2]);
1453 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1454 		 mach->fac_list[0],
1455 		 mach->fac_list[1],
1456 		 mach->fac_list[2]);
1457 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1458 		ret = -EFAULT;
1459 	kfree(mach);
1460 out:
1461 	return ret;
1462 }
1463 
1464 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1465 				       struct kvm_device_attr *attr)
1466 {
1467 	struct kvm_s390_vm_cpu_feat data;
1468 
1469 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1470 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
1471 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1472 		return -EFAULT;
1473 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1474 			 data.feat[0],
1475 			 data.feat[1],
1476 			 data.feat[2]);
1477 	return 0;
1478 }
1479 
1480 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1481 				     struct kvm_device_attr *attr)
1482 {
1483 	struct kvm_s390_vm_cpu_feat data;
1484 
1485 	bitmap_copy((unsigned long *) data.feat,
1486 		    kvm_s390_available_cpu_feat,
1487 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
1488 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1489 		return -EFAULT;
1490 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1491 			 data.feat[0],
1492 			 data.feat[1],
1493 			 data.feat[2]);
1494 	return 0;
1495 }
1496 
1497 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1498 					  struct kvm_device_attr *attr)
1499 {
1500 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1501 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1502 		return -EFAULT;
1503 
1504 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1505 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1506 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1507 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1508 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1509 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1510 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1511 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1512 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1513 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1514 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1515 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1516 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1517 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1518 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1519 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1520 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1521 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1522 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1523 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1524 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1525 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1526 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1527 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1528 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1529 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1530 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1531 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1532 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1533 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1534 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1535 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1536 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1537 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1538 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1539 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1540 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1541 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1542 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1543 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1544 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1545 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1546 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1547 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1548 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1549 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1550 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1551 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1552 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1553 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1554 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1555 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1556 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1557 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1558 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1559 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1560 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1561 
1562 	return 0;
1563 }
1564 
1565 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1566 					struct kvm_device_attr *attr)
1567 {
1568 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1569 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1570 		return -EFAULT;
1571 
1572 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1573 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1574 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1575 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1576 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1577 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1578 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1579 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1580 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1581 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1582 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1583 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1584 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1585 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1586 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1587 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1588 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1589 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1590 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1591 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1592 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1593 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1594 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1595 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1596 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1597 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1598 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1599 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1600 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1601 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1602 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1603 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1604 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1605 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1606 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1607 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1608 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1609 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1610 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1611 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1612 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1613 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1614 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1615 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1616 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1617 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1618 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1619 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1620 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1621 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1622 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1623 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1624 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1625 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1626 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1627 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1628 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1629 
1630 	return 0;
1631 }
1632 
1633 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1634 {
1635 	int ret = -ENXIO;
1636 
1637 	switch (attr->attr) {
1638 	case KVM_S390_VM_CPU_PROCESSOR:
1639 		ret = kvm_s390_get_processor(kvm, attr);
1640 		break;
1641 	case KVM_S390_VM_CPU_MACHINE:
1642 		ret = kvm_s390_get_machine(kvm, attr);
1643 		break;
1644 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1645 		ret = kvm_s390_get_processor_feat(kvm, attr);
1646 		break;
1647 	case KVM_S390_VM_CPU_MACHINE_FEAT:
1648 		ret = kvm_s390_get_machine_feat(kvm, attr);
1649 		break;
1650 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1651 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1652 		break;
1653 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1654 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1655 		break;
1656 	}
1657 	return ret;
1658 }
1659 
1660 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1661 {
1662 	int ret;
1663 
1664 	switch (attr->group) {
1665 	case KVM_S390_VM_MEM_CTRL:
1666 		ret = kvm_s390_set_mem_control(kvm, attr);
1667 		break;
1668 	case KVM_S390_VM_TOD:
1669 		ret = kvm_s390_set_tod(kvm, attr);
1670 		break;
1671 	case KVM_S390_VM_CPU_MODEL:
1672 		ret = kvm_s390_set_cpu_model(kvm, attr);
1673 		break;
1674 	case KVM_S390_VM_CRYPTO:
1675 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1676 		break;
1677 	case KVM_S390_VM_MIGRATION:
1678 		ret = kvm_s390_vm_set_migration(kvm, attr);
1679 		break;
1680 	default:
1681 		ret = -ENXIO;
1682 		break;
1683 	}
1684 
1685 	return ret;
1686 }
1687 
1688 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1689 {
1690 	int ret;
1691 
1692 	switch (attr->group) {
1693 	case KVM_S390_VM_MEM_CTRL:
1694 		ret = kvm_s390_get_mem_control(kvm, attr);
1695 		break;
1696 	case KVM_S390_VM_TOD:
1697 		ret = kvm_s390_get_tod(kvm, attr);
1698 		break;
1699 	case KVM_S390_VM_CPU_MODEL:
1700 		ret = kvm_s390_get_cpu_model(kvm, attr);
1701 		break;
1702 	case KVM_S390_VM_MIGRATION:
1703 		ret = kvm_s390_vm_get_migration(kvm, attr);
1704 		break;
1705 	default:
1706 		ret = -ENXIO;
1707 		break;
1708 	}
1709 
1710 	return ret;
1711 }
1712 
1713 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1714 {
1715 	int ret;
1716 
1717 	switch (attr->group) {
1718 	case KVM_S390_VM_MEM_CTRL:
1719 		switch (attr->attr) {
1720 		case KVM_S390_VM_MEM_ENABLE_CMMA:
1721 		case KVM_S390_VM_MEM_CLR_CMMA:
1722 			ret = sclp.has_cmma ? 0 : -ENXIO;
1723 			break;
1724 		case KVM_S390_VM_MEM_LIMIT_SIZE:
1725 			ret = 0;
1726 			break;
1727 		default:
1728 			ret = -ENXIO;
1729 			break;
1730 		}
1731 		break;
1732 	case KVM_S390_VM_TOD:
1733 		switch (attr->attr) {
1734 		case KVM_S390_VM_TOD_LOW:
1735 		case KVM_S390_VM_TOD_HIGH:
1736 			ret = 0;
1737 			break;
1738 		default:
1739 			ret = -ENXIO;
1740 			break;
1741 		}
1742 		break;
1743 	case KVM_S390_VM_CPU_MODEL:
1744 		switch (attr->attr) {
1745 		case KVM_S390_VM_CPU_PROCESSOR:
1746 		case KVM_S390_VM_CPU_MACHINE:
1747 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1748 		case KVM_S390_VM_CPU_MACHINE_FEAT:
1749 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1750 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1751 			ret = 0;
1752 			break;
1753 		default:
1754 			ret = -ENXIO;
1755 			break;
1756 		}
1757 		break;
1758 	case KVM_S390_VM_CRYPTO:
1759 		switch (attr->attr) {
1760 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1761 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1762 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1763 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1764 			ret = 0;
1765 			break;
1766 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1767 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1768 			ret = ap_instructions_available() ? 0 : -ENXIO;
1769 			break;
1770 		default:
1771 			ret = -ENXIO;
1772 			break;
1773 		}
1774 		break;
1775 	case KVM_S390_VM_MIGRATION:
1776 		ret = 0;
1777 		break;
1778 	default:
1779 		ret = -ENXIO;
1780 		break;
1781 	}
1782 
1783 	return ret;
1784 }
1785 
1786 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1787 {
1788 	uint8_t *keys;
1789 	uint64_t hva;
1790 	int srcu_idx, i, r = 0;
1791 
1792 	if (args->flags != 0)
1793 		return -EINVAL;
1794 
1795 	/* Is this guest using storage keys? */
1796 	if (!mm_uses_skeys(current->mm))
1797 		return KVM_S390_GET_SKEYS_NONE;
1798 
1799 	/* Enforce sane limit on memory allocation */
1800 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1801 		return -EINVAL;
1802 
1803 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1804 	if (!keys)
1805 		return -ENOMEM;
1806 
1807 	down_read(&current->mm->mmap_sem);
1808 	srcu_idx = srcu_read_lock(&kvm->srcu);
1809 	for (i = 0; i < args->count; i++) {
1810 		hva = gfn_to_hva(kvm, args->start_gfn + i);
1811 		if (kvm_is_error_hva(hva)) {
1812 			r = -EFAULT;
1813 			break;
1814 		}
1815 
1816 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1817 		if (r)
1818 			break;
1819 	}
1820 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1821 	up_read(&current->mm->mmap_sem);
1822 
1823 	if (!r) {
1824 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1825 				 sizeof(uint8_t) * args->count);
1826 		if (r)
1827 			r = -EFAULT;
1828 	}
1829 
1830 	kvfree(keys);
1831 	return r;
1832 }
1833 
1834 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1835 {
1836 	uint8_t *keys;
1837 	uint64_t hva;
1838 	int srcu_idx, i, r = 0;
1839 	bool unlocked;
1840 
1841 	if (args->flags != 0)
1842 		return -EINVAL;
1843 
1844 	/* Enforce sane limit on memory allocation */
1845 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1846 		return -EINVAL;
1847 
1848 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1849 	if (!keys)
1850 		return -ENOMEM;
1851 
1852 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1853 			   sizeof(uint8_t) * args->count);
1854 	if (r) {
1855 		r = -EFAULT;
1856 		goto out;
1857 	}
1858 
1859 	/* Enable storage key handling for the guest */
1860 	r = s390_enable_skey();
1861 	if (r)
1862 		goto out;
1863 
1864 	i = 0;
1865 	down_read(&current->mm->mmap_sem);
1866 	srcu_idx = srcu_read_lock(&kvm->srcu);
1867         while (i < args->count) {
1868 		unlocked = false;
1869 		hva = gfn_to_hva(kvm, args->start_gfn + i);
1870 		if (kvm_is_error_hva(hva)) {
1871 			r = -EFAULT;
1872 			break;
1873 		}
1874 
1875 		/* Lowest order bit is reserved */
1876 		if (keys[i] & 0x01) {
1877 			r = -EINVAL;
1878 			break;
1879 		}
1880 
1881 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1882 		if (r) {
1883 			r = fixup_user_fault(current, current->mm, hva,
1884 					     FAULT_FLAG_WRITE, &unlocked);
1885 			if (r)
1886 				break;
1887 		}
1888 		if (!r)
1889 			i++;
1890 	}
1891 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1892 	up_read(&current->mm->mmap_sem);
1893 out:
1894 	kvfree(keys);
1895 	return r;
1896 }
1897 
1898 /*
1899  * Base address and length must be sent at the start of each block, therefore
1900  * it's cheaper to send some clean data, as long as it's less than the size of
1901  * two longs.
1902  */
1903 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1904 /* for consistency */
1905 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1906 
1907 /*
1908  * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1909  * address falls in a hole. In that case the index of one of the memslots
1910  * bordering the hole is returned.
1911  */
1912 static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1913 {
1914 	int start = 0, end = slots->used_slots;
1915 	int slot = atomic_read(&slots->lru_slot);
1916 	struct kvm_memory_slot *memslots = slots->memslots;
1917 
1918 	if (gfn >= memslots[slot].base_gfn &&
1919 	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
1920 		return slot;
1921 
1922 	while (start < end) {
1923 		slot = start + (end - start) / 2;
1924 
1925 		if (gfn >= memslots[slot].base_gfn)
1926 			end = slot;
1927 		else
1928 			start = slot + 1;
1929 	}
1930 
1931 	if (gfn >= memslots[start].base_gfn &&
1932 	    gfn < memslots[start].base_gfn + memslots[start].npages) {
1933 		atomic_set(&slots->lru_slot, start);
1934 	}
1935 
1936 	return start;
1937 }
1938 
1939 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1940 			      u8 *res, unsigned long bufsize)
1941 {
1942 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1943 
1944 	args->count = 0;
1945 	while (args->count < bufsize) {
1946 		hva = gfn_to_hva(kvm, cur_gfn);
1947 		/*
1948 		 * We return an error if the first value was invalid, but we
1949 		 * return successfully if at least one value was copied.
1950 		 */
1951 		if (kvm_is_error_hva(hva))
1952 			return args->count ? 0 : -EFAULT;
1953 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1954 			pgstev = 0;
1955 		res[args->count++] = (pgstev >> 24) & 0x43;
1956 		cur_gfn++;
1957 	}
1958 
1959 	return 0;
1960 }
1961 
1962 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1963 					      unsigned long cur_gfn)
1964 {
1965 	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1966 	struct kvm_memory_slot *ms = slots->memslots + slotidx;
1967 	unsigned long ofs = cur_gfn - ms->base_gfn;
1968 
1969 	if (ms->base_gfn + ms->npages <= cur_gfn) {
1970 		slotidx--;
1971 		/* If we are above the highest slot, wrap around */
1972 		if (slotidx < 0)
1973 			slotidx = slots->used_slots - 1;
1974 
1975 		ms = slots->memslots + slotidx;
1976 		ofs = 0;
1977 	}
1978 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1979 	while ((slotidx > 0) && (ofs >= ms->npages)) {
1980 		slotidx--;
1981 		ms = slots->memslots + slotidx;
1982 		ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1983 	}
1984 	return ms->base_gfn + ofs;
1985 }
1986 
1987 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1988 			     u8 *res, unsigned long bufsize)
1989 {
1990 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1991 	struct kvm_memslots *slots = kvm_memslots(kvm);
1992 	struct kvm_memory_slot *ms;
1993 
1994 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1995 	ms = gfn_to_memslot(kvm, cur_gfn);
1996 	args->count = 0;
1997 	args->start_gfn = cur_gfn;
1998 	if (!ms)
1999 		return 0;
2000 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2001 	mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2002 
2003 	while (args->count < bufsize) {
2004 		hva = gfn_to_hva(kvm, cur_gfn);
2005 		if (kvm_is_error_hva(hva))
2006 			return 0;
2007 		/* Decrement only if we actually flipped the bit to 0 */
2008 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2009 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2010 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2011 			pgstev = 0;
2012 		/* Save the value */
2013 		res[args->count++] = (pgstev >> 24) & 0x43;
2014 		/* If the next bit is too far away, stop. */
2015 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2016 			return 0;
2017 		/* If we reached the previous "next", find the next one */
2018 		if (cur_gfn == next_gfn)
2019 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2020 		/* Reached the end of memory or of the buffer, stop */
2021 		if ((next_gfn >= mem_end) ||
2022 		    (next_gfn - args->start_gfn >= bufsize))
2023 			return 0;
2024 		cur_gfn++;
2025 		/* Reached the end of the current memslot, take the next one. */
2026 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2027 			ms = gfn_to_memslot(kvm, cur_gfn);
2028 			if (!ms)
2029 				return 0;
2030 		}
2031 	}
2032 	return 0;
2033 }
2034 
2035 /*
2036  * This function searches for the next page with dirty CMMA attributes, and
2037  * saves the attributes in the buffer up to either the end of the buffer or
2038  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2039  * no trailing clean bytes are saved.
2040  * In case no dirty bits were found, or if CMMA was not enabled or used, the
2041  * output buffer will indicate 0 as length.
2042  */
2043 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2044 				  struct kvm_s390_cmma_log *args)
2045 {
2046 	unsigned long bufsize;
2047 	int srcu_idx, peek, ret;
2048 	u8 *values;
2049 
2050 	if (!kvm->arch.use_cmma)
2051 		return -ENXIO;
2052 	/* Invalid/unsupported flags were specified */
2053 	if (args->flags & ~KVM_S390_CMMA_PEEK)
2054 		return -EINVAL;
2055 	/* Migration mode query, and we are not doing a migration */
2056 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2057 	if (!peek && !kvm->arch.migration_mode)
2058 		return -EINVAL;
2059 	/* CMMA is disabled or was not used, or the buffer has length zero */
2060 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2061 	if (!bufsize || !kvm->mm->context.uses_cmm) {
2062 		memset(args, 0, sizeof(*args));
2063 		return 0;
2064 	}
2065 	/* We are not peeking, and there are no dirty pages */
2066 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2067 		memset(args, 0, sizeof(*args));
2068 		return 0;
2069 	}
2070 
2071 	values = vmalloc(bufsize);
2072 	if (!values)
2073 		return -ENOMEM;
2074 
2075 	down_read(&kvm->mm->mmap_sem);
2076 	srcu_idx = srcu_read_lock(&kvm->srcu);
2077 	if (peek)
2078 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2079 	else
2080 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2081 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2082 	up_read(&kvm->mm->mmap_sem);
2083 
2084 	if (kvm->arch.migration_mode)
2085 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2086 	else
2087 		args->remaining = 0;
2088 
2089 	if (copy_to_user((void __user *)args->values, values, args->count))
2090 		ret = -EFAULT;
2091 
2092 	vfree(values);
2093 	return ret;
2094 }
2095 
2096 /*
2097  * This function sets the CMMA attributes for the given pages. If the input
2098  * buffer has zero length, no action is taken, otherwise the attributes are
2099  * set and the mm->context.uses_cmm flag is set.
2100  */
2101 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2102 				  const struct kvm_s390_cmma_log *args)
2103 {
2104 	unsigned long hva, mask, pgstev, i;
2105 	uint8_t *bits;
2106 	int srcu_idx, r = 0;
2107 
2108 	mask = args->mask;
2109 
2110 	if (!kvm->arch.use_cmma)
2111 		return -ENXIO;
2112 	/* invalid/unsupported flags */
2113 	if (args->flags != 0)
2114 		return -EINVAL;
2115 	/* Enforce sane limit on memory allocation */
2116 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2117 		return -EINVAL;
2118 	/* Nothing to do */
2119 	if (args->count == 0)
2120 		return 0;
2121 
2122 	bits = vmalloc(array_size(sizeof(*bits), args->count));
2123 	if (!bits)
2124 		return -ENOMEM;
2125 
2126 	r = copy_from_user(bits, (void __user *)args->values, args->count);
2127 	if (r) {
2128 		r = -EFAULT;
2129 		goto out;
2130 	}
2131 
2132 	down_read(&kvm->mm->mmap_sem);
2133 	srcu_idx = srcu_read_lock(&kvm->srcu);
2134 	for (i = 0; i < args->count; i++) {
2135 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2136 		if (kvm_is_error_hva(hva)) {
2137 			r = -EFAULT;
2138 			break;
2139 		}
2140 
2141 		pgstev = bits[i];
2142 		pgstev = pgstev << 24;
2143 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2144 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
2145 	}
2146 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2147 	up_read(&kvm->mm->mmap_sem);
2148 
2149 	if (!kvm->mm->context.uses_cmm) {
2150 		down_write(&kvm->mm->mmap_sem);
2151 		kvm->mm->context.uses_cmm = 1;
2152 		up_write(&kvm->mm->mmap_sem);
2153 	}
2154 out:
2155 	vfree(bits);
2156 	return r;
2157 }
2158 
2159 long kvm_arch_vm_ioctl(struct file *filp,
2160 		       unsigned int ioctl, unsigned long arg)
2161 {
2162 	struct kvm *kvm = filp->private_data;
2163 	void __user *argp = (void __user *)arg;
2164 	struct kvm_device_attr attr;
2165 	int r;
2166 
2167 	switch (ioctl) {
2168 	case KVM_S390_INTERRUPT: {
2169 		struct kvm_s390_interrupt s390int;
2170 
2171 		r = -EFAULT;
2172 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2173 			break;
2174 		r = kvm_s390_inject_vm(kvm, &s390int);
2175 		break;
2176 	}
2177 	case KVM_CREATE_IRQCHIP: {
2178 		struct kvm_irq_routing_entry routing;
2179 
2180 		r = -EINVAL;
2181 		if (kvm->arch.use_irqchip) {
2182 			/* Set up dummy routing. */
2183 			memset(&routing, 0, sizeof(routing));
2184 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2185 		}
2186 		break;
2187 	}
2188 	case KVM_SET_DEVICE_ATTR: {
2189 		r = -EFAULT;
2190 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2191 			break;
2192 		r = kvm_s390_vm_set_attr(kvm, &attr);
2193 		break;
2194 	}
2195 	case KVM_GET_DEVICE_ATTR: {
2196 		r = -EFAULT;
2197 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2198 			break;
2199 		r = kvm_s390_vm_get_attr(kvm, &attr);
2200 		break;
2201 	}
2202 	case KVM_HAS_DEVICE_ATTR: {
2203 		r = -EFAULT;
2204 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2205 			break;
2206 		r = kvm_s390_vm_has_attr(kvm, &attr);
2207 		break;
2208 	}
2209 	case KVM_S390_GET_SKEYS: {
2210 		struct kvm_s390_skeys args;
2211 
2212 		r = -EFAULT;
2213 		if (copy_from_user(&args, argp,
2214 				   sizeof(struct kvm_s390_skeys)))
2215 			break;
2216 		r = kvm_s390_get_skeys(kvm, &args);
2217 		break;
2218 	}
2219 	case KVM_S390_SET_SKEYS: {
2220 		struct kvm_s390_skeys args;
2221 
2222 		r = -EFAULT;
2223 		if (copy_from_user(&args, argp,
2224 				   sizeof(struct kvm_s390_skeys)))
2225 			break;
2226 		r = kvm_s390_set_skeys(kvm, &args);
2227 		break;
2228 	}
2229 	case KVM_S390_GET_CMMA_BITS: {
2230 		struct kvm_s390_cmma_log args;
2231 
2232 		r = -EFAULT;
2233 		if (copy_from_user(&args, argp, sizeof(args)))
2234 			break;
2235 		mutex_lock(&kvm->slots_lock);
2236 		r = kvm_s390_get_cmma_bits(kvm, &args);
2237 		mutex_unlock(&kvm->slots_lock);
2238 		if (!r) {
2239 			r = copy_to_user(argp, &args, sizeof(args));
2240 			if (r)
2241 				r = -EFAULT;
2242 		}
2243 		break;
2244 	}
2245 	case KVM_S390_SET_CMMA_BITS: {
2246 		struct kvm_s390_cmma_log args;
2247 
2248 		r = -EFAULT;
2249 		if (copy_from_user(&args, argp, sizeof(args)))
2250 			break;
2251 		mutex_lock(&kvm->slots_lock);
2252 		r = kvm_s390_set_cmma_bits(kvm, &args);
2253 		mutex_unlock(&kvm->slots_lock);
2254 		break;
2255 	}
2256 	default:
2257 		r = -ENOTTY;
2258 	}
2259 
2260 	return r;
2261 }
2262 
2263 static int kvm_s390_apxa_installed(void)
2264 {
2265 	struct ap_config_info info;
2266 
2267 	if (ap_instructions_available()) {
2268 		if (ap_qci(&info) == 0)
2269 			return info.apxa;
2270 	}
2271 
2272 	return 0;
2273 }
2274 
2275 /*
2276  * The format of the crypto control block (CRYCB) is specified in the 3 low
2277  * order bits of the CRYCB designation (CRYCBD) field as follows:
2278  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2279  *	     AP extended addressing (APXA) facility are installed.
2280  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2281  * Format 2: Both the APXA and MSAX3 facilities are installed
2282  */
2283 static void kvm_s390_set_crycb_format(struct kvm *kvm)
2284 {
2285 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2286 
2287 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
2288 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2289 
2290 	/* Check whether MSAX3 is installed */
2291 	if (!test_kvm_facility(kvm, 76))
2292 		return;
2293 
2294 	if (kvm_s390_apxa_installed())
2295 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2296 	else
2297 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2298 }
2299 
2300 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2301 			       unsigned long *aqm, unsigned long *adm)
2302 {
2303 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2304 
2305 	mutex_lock(&kvm->lock);
2306 	kvm_s390_vcpu_block_all(kvm);
2307 
2308 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2309 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2310 		memcpy(crycb->apcb1.apm, apm, 32);
2311 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2312 			 apm[0], apm[1], apm[2], apm[3]);
2313 		memcpy(crycb->apcb1.aqm, aqm, 32);
2314 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2315 			 aqm[0], aqm[1], aqm[2], aqm[3]);
2316 		memcpy(crycb->apcb1.adm, adm, 32);
2317 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2318 			 adm[0], adm[1], adm[2], adm[3]);
2319 		break;
2320 	case CRYCB_FORMAT1:
2321 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2322 		memcpy(crycb->apcb0.apm, apm, 8);
2323 		memcpy(crycb->apcb0.aqm, aqm, 2);
2324 		memcpy(crycb->apcb0.adm, adm, 2);
2325 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2326 			 apm[0], *((unsigned short *)aqm),
2327 			 *((unsigned short *)adm));
2328 		break;
2329 	default:	/* Can not happen */
2330 		break;
2331 	}
2332 
2333 	/* recreate the shadow crycb for each vcpu */
2334 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2335 	kvm_s390_vcpu_unblock_all(kvm);
2336 	mutex_unlock(&kvm->lock);
2337 }
2338 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2339 
2340 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2341 {
2342 	mutex_lock(&kvm->lock);
2343 	kvm_s390_vcpu_block_all(kvm);
2344 
2345 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
2346 	       sizeof(kvm->arch.crypto.crycb->apcb0));
2347 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
2348 	       sizeof(kvm->arch.crypto.crycb->apcb1));
2349 
2350 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
2351 	/* recreate the shadow crycb for each vcpu */
2352 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2353 	kvm_s390_vcpu_unblock_all(kvm);
2354 	mutex_unlock(&kvm->lock);
2355 }
2356 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2357 
2358 static u64 kvm_s390_get_initial_cpuid(void)
2359 {
2360 	struct cpuid cpuid;
2361 
2362 	get_cpu_id(&cpuid);
2363 	cpuid.version = 0xff;
2364 	return *((u64 *) &cpuid);
2365 }
2366 
2367 static void kvm_s390_crypto_init(struct kvm *kvm)
2368 {
2369 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
2370 	kvm_s390_set_crycb_format(kvm);
2371 
2372 	if (!test_kvm_facility(kvm, 76))
2373 		return;
2374 
2375 	/* Enable AES/DEA protected key functions by default */
2376 	kvm->arch.crypto.aes_kw = 1;
2377 	kvm->arch.crypto.dea_kw = 1;
2378 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2379 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2380 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2381 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
2382 }
2383 
2384 static void sca_dispose(struct kvm *kvm)
2385 {
2386 	if (kvm->arch.use_esca)
2387 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
2388 	else
2389 		free_page((unsigned long)(kvm->arch.sca));
2390 	kvm->arch.sca = NULL;
2391 }
2392 
2393 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
2394 {
2395 	gfp_t alloc_flags = GFP_KERNEL;
2396 	int i, rc;
2397 	char debug_name[16];
2398 	static unsigned long sca_offset;
2399 
2400 	rc = -EINVAL;
2401 #ifdef CONFIG_KVM_S390_UCONTROL
2402 	if (type & ~KVM_VM_S390_UCONTROL)
2403 		goto out_err;
2404 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2405 		goto out_err;
2406 #else
2407 	if (type)
2408 		goto out_err;
2409 #endif
2410 
2411 	rc = s390_enable_sie();
2412 	if (rc)
2413 		goto out_err;
2414 
2415 	rc = -ENOMEM;
2416 
2417 	if (!sclp.has_64bscao)
2418 		alloc_flags |= GFP_DMA;
2419 	rwlock_init(&kvm->arch.sca_lock);
2420 	/* start with basic SCA */
2421 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
2422 	if (!kvm->arch.sca)
2423 		goto out_err;
2424 	mutex_lock(&kvm_lock);
2425 	sca_offset += 16;
2426 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
2427 		sca_offset = 0;
2428 	kvm->arch.sca = (struct bsca_block *)
2429 			((char *) kvm->arch.sca + sca_offset);
2430 	mutex_unlock(&kvm_lock);
2431 
2432 	sprintf(debug_name, "kvm-%u", current->pid);
2433 
2434 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
2435 	if (!kvm->arch.dbf)
2436 		goto out_err;
2437 
2438 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
2439 	kvm->arch.sie_page2 =
2440 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2441 	if (!kvm->arch.sie_page2)
2442 		goto out_err;
2443 
2444 	kvm->arch.sie_page2->kvm = kvm;
2445 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
2446 
2447 	for (i = 0; i < kvm_s390_fac_size(); i++) {
2448 		kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2449 					      (kvm_s390_fac_base[i] |
2450 					       kvm_s390_fac_ext[i]);
2451 		kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2452 					      kvm_s390_fac_base[i];
2453 	}
2454 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
2455 
2456 	/* we are always in czam mode - even on pre z14 machines */
2457 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
2458 	set_kvm_facility(kvm->arch.model.fac_list, 138);
2459 	/* we emulate STHYI in kvm */
2460 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
2461 	set_kvm_facility(kvm->arch.model.fac_list, 74);
2462 	if (MACHINE_HAS_TLB_GUEST) {
2463 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
2464 		set_kvm_facility(kvm->arch.model.fac_list, 147);
2465 	}
2466 
2467 	if (css_general_characteristics.aiv && test_facility(65))
2468 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
2469 
2470 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
2471 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
2472 
2473 	kvm_s390_crypto_init(kvm);
2474 
2475 	mutex_init(&kvm->arch.float_int.ais_lock);
2476 	spin_lock_init(&kvm->arch.float_int.lock);
2477 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
2478 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
2479 	init_waitqueue_head(&kvm->arch.ipte_wq);
2480 	mutex_init(&kvm->arch.ipte_mutex);
2481 
2482 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
2483 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
2484 
2485 	if (type & KVM_VM_S390_UCONTROL) {
2486 		kvm->arch.gmap = NULL;
2487 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
2488 	} else {
2489 		if (sclp.hamax == U64_MAX)
2490 			kvm->arch.mem_limit = TASK_SIZE_MAX;
2491 		else
2492 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
2493 						    sclp.hamax + 1);
2494 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
2495 		if (!kvm->arch.gmap)
2496 			goto out_err;
2497 		kvm->arch.gmap->private = kvm;
2498 		kvm->arch.gmap->pfault_enabled = 0;
2499 	}
2500 
2501 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
2502 	kvm->arch.use_skf = sclp.has_skey;
2503 	spin_lock_init(&kvm->arch.start_stop_lock);
2504 	kvm_s390_vsie_init(kvm);
2505 	kvm_s390_gisa_init(kvm);
2506 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
2507 
2508 	return 0;
2509 out_err:
2510 	free_page((unsigned long)kvm->arch.sie_page2);
2511 	debug_unregister(kvm->arch.dbf);
2512 	sca_dispose(kvm);
2513 	KVM_EVENT(3, "creation of vm failed: %d", rc);
2514 	return rc;
2515 }
2516 
2517 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2518 {
2519 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
2520 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
2521 	kvm_s390_clear_local_irqs(vcpu);
2522 	kvm_clear_async_pf_completion_queue(vcpu);
2523 	if (!kvm_is_ucontrol(vcpu->kvm))
2524 		sca_del_vcpu(vcpu);
2525 
2526 	if (kvm_is_ucontrol(vcpu->kvm))
2527 		gmap_remove(vcpu->arch.gmap);
2528 
2529 	if (vcpu->kvm->arch.use_cmma)
2530 		kvm_s390_vcpu_unsetup_cmma(vcpu);
2531 	free_page((unsigned long)(vcpu->arch.sie_block));
2532 
2533 	kvm_vcpu_uninit(vcpu);
2534 	kmem_cache_free(kvm_vcpu_cache, vcpu);
2535 }
2536 
2537 static void kvm_free_vcpus(struct kvm *kvm)
2538 {
2539 	unsigned int i;
2540 	struct kvm_vcpu *vcpu;
2541 
2542 	kvm_for_each_vcpu(i, vcpu, kvm)
2543 		kvm_arch_vcpu_destroy(vcpu);
2544 
2545 	mutex_lock(&kvm->lock);
2546 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2547 		kvm->vcpus[i] = NULL;
2548 
2549 	atomic_set(&kvm->online_vcpus, 0);
2550 	mutex_unlock(&kvm->lock);
2551 }
2552 
2553 void kvm_arch_destroy_vm(struct kvm *kvm)
2554 {
2555 	kvm_free_vcpus(kvm);
2556 	sca_dispose(kvm);
2557 	debug_unregister(kvm->arch.dbf);
2558 	kvm_s390_gisa_destroy(kvm);
2559 	free_page((unsigned long)kvm->arch.sie_page2);
2560 	if (!kvm_is_ucontrol(kvm))
2561 		gmap_remove(kvm->arch.gmap);
2562 	kvm_s390_destroy_adapters(kvm);
2563 	kvm_s390_clear_float_irqs(kvm);
2564 	kvm_s390_vsie_destroy(kvm);
2565 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
2566 }
2567 
2568 /* Section: vcpu related */
2569 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2570 {
2571 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
2572 	if (!vcpu->arch.gmap)
2573 		return -ENOMEM;
2574 	vcpu->arch.gmap->private = vcpu->kvm;
2575 
2576 	return 0;
2577 }
2578 
2579 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2580 {
2581 	if (!kvm_s390_use_sca_entries())
2582 		return;
2583 	read_lock(&vcpu->kvm->arch.sca_lock);
2584 	if (vcpu->kvm->arch.use_esca) {
2585 		struct esca_block *sca = vcpu->kvm->arch.sca;
2586 
2587 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2588 		sca->cpu[vcpu->vcpu_id].sda = 0;
2589 	} else {
2590 		struct bsca_block *sca = vcpu->kvm->arch.sca;
2591 
2592 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2593 		sca->cpu[vcpu->vcpu_id].sda = 0;
2594 	}
2595 	read_unlock(&vcpu->kvm->arch.sca_lock);
2596 }
2597 
2598 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2599 {
2600 	if (!kvm_s390_use_sca_entries()) {
2601 		struct bsca_block *sca = vcpu->kvm->arch.sca;
2602 
2603 		/* we still need the basic sca for the ipte control */
2604 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2605 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2606 		return;
2607 	}
2608 	read_lock(&vcpu->kvm->arch.sca_lock);
2609 	if (vcpu->kvm->arch.use_esca) {
2610 		struct esca_block *sca = vcpu->kvm->arch.sca;
2611 
2612 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2613 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2614 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
2615 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2616 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2617 	} else {
2618 		struct bsca_block *sca = vcpu->kvm->arch.sca;
2619 
2620 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2621 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2622 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2623 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2624 	}
2625 	read_unlock(&vcpu->kvm->arch.sca_lock);
2626 }
2627 
2628 /* Basic SCA to Extended SCA data copy routines */
2629 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2630 {
2631 	d->sda = s->sda;
2632 	d->sigp_ctrl.c = s->sigp_ctrl.c;
2633 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2634 }
2635 
2636 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2637 {
2638 	int i;
2639 
2640 	d->ipte_control = s->ipte_control;
2641 	d->mcn[0] = s->mcn;
2642 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2643 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2644 }
2645 
2646 static int sca_switch_to_extended(struct kvm *kvm)
2647 {
2648 	struct bsca_block *old_sca = kvm->arch.sca;
2649 	struct esca_block *new_sca;
2650 	struct kvm_vcpu *vcpu;
2651 	unsigned int vcpu_idx;
2652 	u32 scaol, scaoh;
2653 
2654 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2655 	if (!new_sca)
2656 		return -ENOMEM;
2657 
2658 	scaoh = (u32)((u64)(new_sca) >> 32);
2659 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
2660 
2661 	kvm_s390_vcpu_block_all(kvm);
2662 	write_lock(&kvm->arch.sca_lock);
2663 
2664 	sca_copy_b_to_e(new_sca, old_sca);
2665 
2666 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2667 		vcpu->arch.sie_block->scaoh = scaoh;
2668 		vcpu->arch.sie_block->scaol = scaol;
2669 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2670 	}
2671 	kvm->arch.sca = new_sca;
2672 	kvm->arch.use_esca = 1;
2673 
2674 	write_unlock(&kvm->arch.sca_lock);
2675 	kvm_s390_vcpu_unblock_all(kvm);
2676 
2677 	free_page((unsigned long)old_sca);
2678 
2679 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2680 		 old_sca, kvm->arch.sca);
2681 	return 0;
2682 }
2683 
2684 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2685 {
2686 	int rc;
2687 
2688 	if (!kvm_s390_use_sca_entries()) {
2689 		if (id < KVM_MAX_VCPUS)
2690 			return true;
2691 		return false;
2692 	}
2693 	if (id < KVM_S390_BSCA_CPU_SLOTS)
2694 		return true;
2695 	if (!sclp.has_esca || !sclp.has_64bscao)
2696 		return false;
2697 
2698 	mutex_lock(&kvm->lock);
2699 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2700 	mutex_unlock(&kvm->lock);
2701 
2702 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
2703 }
2704 
2705 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2706 {
2707 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2708 	kvm_clear_async_pf_completion_queue(vcpu);
2709 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2710 				    KVM_SYNC_GPRS |
2711 				    KVM_SYNC_ACRS |
2712 				    KVM_SYNC_CRS |
2713 				    KVM_SYNC_ARCH0 |
2714 				    KVM_SYNC_PFAULT;
2715 	kvm_s390_set_prefix(vcpu, 0);
2716 	if (test_kvm_facility(vcpu->kvm, 64))
2717 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2718 	if (test_kvm_facility(vcpu->kvm, 82))
2719 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
2720 	if (test_kvm_facility(vcpu->kvm, 133))
2721 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2722 	if (test_kvm_facility(vcpu->kvm, 156))
2723 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
2724 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
2725 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2726 	 */
2727 	if (MACHINE_HAS_VX)
2728 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
2729 	else
2730 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
2731 
2732 	if (kvm_is_ucontrol(vcpu->kvm))
2733 		return __kvm_ucontrol_vcpu_init(vcpu);
2734 
2735 	return 0;
2736 }
2737 
2738 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2739 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2740 {
2741 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
2742 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2743 	vcpu->arch.cputm_start = get_tod_clock_fast();
2744 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2745 }
2746 
2747 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2748 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2749 {
2750 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
2751 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2752 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2753 	vcpu->arch.cputm_start = 0;
2754 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2755 }
2756 
2757 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2758 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2759 {
2760 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2761 	vcpu->arch.cputm_enabled = true;
2762 	__start_cpu_timer_accounting(vcpu);
2763 }
2764 
2765 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2766 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2767 {
2768 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2769 	__stop_cpu_timer_accounting(vcpu);
2770 	vcpu->arch.cputm_enabled = false;
2771 }
2772 
2773 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2774 {
2775 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2776 	__enable_cpu_timer_accounting(vcpu);
2777 	preempt_enable();
2778 }
2779 
2780 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2781 {
2782 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2783 	__disable_cpu_timer_accounting(vcpu);
2784 	preempt_enable();
2785 }
2786 
2787 /* set the cpu timer - may only be called from the VCPU thread itself */
2788 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2789 {
2790 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2791 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2792 	if (vcpu->arch.cputm_enabled)
2793 		vcpu->arch.cputm_start = get_tod_clock_fast();
2794 	vcpu->arch.sie_block->cputm = cputm;
2795 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2796 	preempt_enable();
2797 }
2798 
2799 /* update and get the cpu timer - can also be called from other VCPU threads */
2800 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2801 {
2802 	unsigned int seq;
2803 	__u64 value;
2804 
2805 	if (unlikely(!vcpu->arch.cputm_enabled))
2806 		return vcpu->arch.sie_block->cputm;
2807 
2808 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2809 	do {
2810 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2811 		/*
2812 		 * If the writer would ever execute a read in the critical
2813 		 * section, e.g. in irq context, we have a deadlock.
2814 		 */
2815 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2816 		value = vcpu->arch.sie_block->cputm;
2817 		/* if cputm_start is 0, accounting is being started/stopped */
2818 		if (likely(vcpu->arch.cputm_start))
2819 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2820 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2821 	preempt_enable();
2822 	return value;
2823 }
2824 
2825 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2826 {
2827 
2828 	gmap_enable(vcpu->arch.enabled_gmap);
2829 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
2830 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2831 		__start_cpu_timer_accounting(vcpu);
2832 	vcpu->cpu = cpu;
2833 }
2834 
2835 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2836 {
2837 	vcpu->cpu = -1;
2838 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2839 		__stop_cpu_timer_accounting(vcpu);
2840 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
2841 	vcpu->arch.enabled_gmap = gmap_get_enabled();
2842 	gmap_disable(vcpu->arch.enabled_gmap);
2843 
2844 }
2845 
2846 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2847 {
2848 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
2849 	vcpu->arch.sie_block->gpsw.mask = 0UL;
2850 	vcpu->arch.sie_block->gpsw.addr = 0UL;
2851 	kvm_s390_set_prefix(vcpu, 0);
2852 	kvm_s390_set_cpu_timer(vcpu, 0);
2853 	vcpu->arch.sie_block->ckc       = 0UL;
2854 	vcpu->arch.sie_block->todpr     = 0;
2855 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2856 	vcpu->arch.sie_block->gcr[0]  = CR0_UNUSED_56 |
2857 					CR0_INTERRUPT_KEY_SUBMASK |
2858 					CR0_MEASUREMENT_ALERT_SUBMASK;
2859 	vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2860 					CR14_UNUSED_33 |
2861 					CR14_EXTERNAL_DAMAGE_SUBMASK;
2862 	/* make sure the new fpc will be lazily loaded */
2863 	save_fpu_regs();
2864 	current->thread.fpu.fpc = 0;
2865 	vcpu->arch.sie_block->gbea = 1;
2866 	vcpu->arch.sie_block->pp = 0;
2867 	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2868 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2869 	kvm_clear_async_pf_completion_queue(vcpu);
2870 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2871 		kvm_s390_vcpu_stop(vcpu);
2872 	kvm_s390_clear_local_irqs(vcpu);
2873 }
2874 
2875 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2876 {
2877 	mutex_lock(&vcpu->kvm->lock);
2878 	preempt_disable();
2879 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
2880 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
2881 	preempt_enable();
2882 	mutex_unlock(&vcpu->kvm->lock);
2883 	if (!kvm_is_ucontrol(vcpu->kvm)) {
2884 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
2885 		sca_add_vcpu(vcpu);
2886 	}
2887 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2888 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2889 	/* make vcpu_load load the right gmap on the first trigger */
2890 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
2891 }
2892 
2893 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2894 {
2895 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2896 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2897 		return true;
2898 	return false;
2899 }
2900 
2901 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2902 {
2903 	/* At least one ECC subfunction must be present */
2904 	return kvm_has_pckmo_subfunc(kvm, 32) ||
2905 	       kvm_has_pckmo_subfunc(kvm, 33) ||
2906 	       kvm_has_pckmo_subfunc(kvm, 34) ||
2907 	       kvm_has_pckmo_subfunc(kvm, 40) ||
2908 	       kvm_has_pckmo_subfunc(kvm, 41);
2909 
2910 }
2911 
2912 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2913 {
2914 	/*
2915 	 * If the AP instructions are not being interpreted and the MSAX3
2916 	 * facility is not configured for the guest, there is nothing to set up.
2917 	 */
2918 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
2919 		return;
2920 
2921 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2922 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2923 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
2924 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
2925 
2926 	if (vcpu->kvm->arch.crypto.apie)
2927 		vcpu->arch.sie_block->eca |= ECA_APIE;
2928 
2929 	/* Set up protected key support */
2930 	if (vcpu->kvm->arch.crypto.aes_kw) {
2931 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2932 		/* ecc is also wrapped with AES key */
2933 		if (kvm_has_pckmo_ecc(vcpu->kvm))
2934 			vcpu->arch.sie_block->ecd |= ECD_ECC;
2935 	}
2936 
2937 	if (vcpu->kvm->arch.crypto.dea_kw)
2938 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2939 }
2940 
2941 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2942 {
2943 	free_page(vcpu->arch.sie_block->cbrlo);
2944 	vcpu->arch.sie_block->cbrlo = 0;
2945 }
2946 
2947 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2948 {
2949 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2950 	if (!vcpu->arch.sie_block->cbrlo)
2951 		return -ENOMEM;
2952 	return 0;
2953 }
2954 
2955 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2956 {
2957 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2958 
2959 	vcpu->arch.sie_block->ibc = model->ibc;
2960 	if (test_kvm_facility(vcpu->kvm, 7))
2961 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
2962 }
2963 
2964 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2965 {
2966 	int rc = 0;
2967 
2968 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2969 						    CPUSTAT_SM |
2970 						    CPUSTAT_STOPPED);
2971 
2972 	if (test_kvm_facility(vcpu->kvm, 78))
2973 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
2974 	else if (test_kvm_facility(vcpu->kvm, 8))
2975 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
2976 
2977 	kvm_s390_vcpu_setup_model(vcpu);
2978 
2979 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2980 	if (MACHINE_HAS_ESOP)
2981 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
2982 	if (test_kvm_facility(vcpu->kvm, 9))
2983 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
2984 	if (test_kvm_facility(vcpu->kvm, 73))
2985 		vcpu->arch.sie_block->ecb |= ECB_TE;
2986 
2987 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
2988 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
2989 	if (test_kvm_facility(vcpu->kvm, 130))
2990 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2991 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
2992 	if (sclp.has_cei)
2993 		vcpu->arch.sie_block->eca |= ECA_CEI;
2994 	if (sclp.has_ib)
2995 		vcpu->arch.sie_block->eca |= ECA_IB;
2996 	if (sclp.has_siif)
2997 		vcpu->arch.sie_block->eca |= ECA_SII;
2998 	if (sclp.has_sigpif)
2999 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3000 	if (test_kvm_facility(vcpu->kvm, 129)) {
3001 		vcpu->arch.sie_block->eca |= ECA_VX;
3002 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3003 	}
3004 	if (test_kvm_facility(vcpu->kvm, 139))
3005 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3006 	if (test_kvm_facility(vcpu->kvm, 156))
3007 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3008 	if (vcpu->arch.sie_block->gd) {
3009 		vcpu->arch.sie_block->eca |= ECA_AIV;
3010 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3011 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3012 	}
3013 	vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3014 					| SDNXC;
3015 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3016 
3017 	if (sclp.has_kss)
3018 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3019 	else
3020 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3021 
3022 	if (vcpu->kvm->arch.use_cmma) {
3023 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3024 		if (rc)
3025 			return rc;
3026 	}
3027 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3028 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3029 
3030 	vcpu->arch.sie_block->hpid = HPID_KVM;
3031 
3032 	kvm_s390_vcpu_crypto_setup(vcpu);
3033 
3034 	return rc;
3035 }
3036 
3037 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3038 				      unsigned int id)
3039 {
3040 	struct kvm_vcpu *vcpu;
3041 	struct sie_page *sie_page;
3042 	int rc = -EINVAL;
3043 
3044 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3045 		goto out;
3046 
3047 	rc = -ENOMEM;
3048 
3049 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3050 	if (!vcpu)
3051 		goto out;
3052 
3053 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3054 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3055 	if (!sie_page)
3056 		goto out_free_cpu;
3057 
3058 	vcpu->arch.sie_block = &sie_page->sie_block;
3059 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3060 
3061 	/* the real guest size will always be smaller than msl */
3062 	vcpu->arch.sie_block->mso = 0;
3063 	vcpu->arch.sie_block->msl = sclp.hamax;
3064 
3065 	vcpu->arch.sie_block->icpua = id;
3066 	spin_lock_init(&vcpu->arch.local_int.lock);
3067 	vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
3068 	if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3069 		vcpu->arch.sie_block->gd |= GISA_FORMAT1;
3070 	seqcount_init(&vcpu->arch.cputm_seqcount);
3071 
3072 	rc = kvm_vcpu_init(vcpu, kvm, id);
3073 	if (rc)
3074 		goto out_free_sie_block;
3075 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
3076 		 vcpu->arch.sie_block);
3077 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
3078 
3079 	return vcpu;
3080 out_free_sie_block:
3081 	free_page((unsigned long)(vcpu->arch.sie_block));
3082 out_free_cpu:
3083 	kmem_cache_free(kvm_vcpu_cache, vcpu);
3084 out:
3085 	return ERR_PTR(rc);
3086 }
3087 
3088 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3089 {
3090 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3091 }
3092 
3093 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3094 {
3095 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3096 }
3097 
3098 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3099 {
3100 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3101 	exit_sie(vcpu);
3102 }
3103 
3104 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3105 {
3106 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3107 }
3108 
3109 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3110 {
3111 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3112 	exit_sie(vcpu);
3113 }
3114 
3115 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3116 {
3117 	return atomic_read(&vcpu->arch.sie_block->prog20) &
3118 	       (PROG_BLOCK_SIE | PROG_REQUEST);
3119 }
3120 
3121 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3122 {
3123 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3124 }
3125 
3126 /*
3127  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3128  * If the CPU is not running (e.g. waiting as idle) the function will
3129  * return immediately. */
3130 void exit_sie(struct kvm_vcpu *vcpu)
3131 {
3132 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3133 	kvm_s390_vsie_kick(vcpu);
3134 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3135 		cpu_relax();
3136 }
3137 
3138 /* Kick a guest cpu out of SIE to process a request synchronously */
3139 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3140 {
3141 	kvm_make_request(req, vcpu);
3142 	kvm_s390_vcpu_request(vcpu);
3143 }
3144 
3145 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3146 			      unsigned long end)
3147 {
3148 	struct kvm *kvm = gmap->private;
3149 	struct kvm_vcpu *vcpu;
3150 	unsigned long prefix;
3151 	int i;
3152 
3153 	if (gmap_is_shadow(gmap))
3154 		return;
3155 	if (start >= 1UL << 31)
3156 		/* We are only interested in prefix pages */
3157 		return;
3158 	kvm_for_each_vcpu(i, vcpu, kvm) {
3159 		/* match against both prefix pages */
3160 		prefix = kvm_s390_get_prefix(vcpu);
3161 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3162 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3163 				   start, end);
3164 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
3165 		}
3166 	}
3167 }
3168 
3169 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3170 {
3171 	/* do not poll with more than halt_poll_max_steal percent of steal time */
3172 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3173 	    halt_poll_max_steal) {
3174 		vcpu->stat.halt_no_poll_steal++;
3175 		return true;
3176 	}
3177 	return false;
3178 }
3179 
3180 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3181 {
3182 	/* kvm common code refers to this, but never calls it */
3183 	BUG();
3184 	return 0;
3185 }
3186 
3187 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3188 					   struct kvm_one_reg *reg)
3189 {
3190 	int r = -EINVAL;
3191 
3192 	switch (reg->id) {
3193 	case KVM_REG_S390_TODPR:
3194 		r = put_user(vcpu->arch.sie_block->todpr,
3195 			     (u32 __user *)reg->addr);
3196 		break;
3197 	case KVM_REG_S390_EPOCHDIFF:
3198 		r = put_user(vcpu->arch.sie_block->epoch,
3199 			     (u64 __user *)reg->addr);
3200 		break;
3201 	case KVM_REG_S390_CPU_TIMER:
3202 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
3203 			     (u64 __user *)reg->addr);
3204 		break;
3205 	case KVM_REG_S390_CLOCK_COMP:
3206 		r = put_user(vcpu->arch.sie_block->ckc,
3207 			     (u64 __user *)reg->addr);
3208 		break;
3209 	case KVM_REG_S390_PFTOKEN:
3210 		r = put_user(vcpu->arch.pfault_token,
3211 			     (u64 __user *)reg->addr);
3212 		break;
3213 	case KVM_REG_S390_PFCOMPARE:
3214 		r = put_user(vcpu->arch.pfault_compare,
3215 			     (u64 __user *)reg->addr);
3216 		break;
3217 	case KVM_REG_S390_PFSELECT:
3218 		r = put_user(vcpu->arch.pfault_select,
3219 			     (u64 __user *)reg->addr);
3220 		break;
3221 	case KVM_REG_S390_PP:
3222 		r = put_user(vcpu->arch.sie_block->pp,
3223 			     (u64 __user *)reg->addr);
3224 		break;
3225 	case KVM_REG_S390_GBEA:
3226 		r = put_user(vcpu->arch.sie_block->gbea,
3227 			     (u64 __user *)reg->addr);
3228 		break;
3229 	default:
3230 		break;
3231 	}
3232 
3233 	return r;
3234 }
3235 
3236 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3237 					   struct kvm_one_reg *reg)
3238 {
3239 	int r = -EINVAL;
3240 	__u64 val;
3241 
3242 	switch (reg->id) {
3243 	case KVM_REG_S390_TODPR:
3244 		r = get_user(vcpu->arch.sie_block->todpr,
3245 			     (u32 __user *)reg->addr);
3246 		break;
3247 	case KVM_REG_S390_EPOCHDIFF:
3248 		r = get_user(vcpu->arch.sie_block->epoch,
3249 			     (u64 __user *)reg->addr);
3250 		break;
3251 	case KVM_REG_S390_CPU_TIMER:
3252 		r = get_user(val, (u64 __user *)reg->addr);
3253 		if (!r)
3254 			kvm_s390_set_cpu_timer(vcpu, val);
3255 		break;
3256 	case KVM_REG_S390_CLOCK_COMP:
3257 		r = get_user(vcpu->arch.sie_block->ckc,
3258 			     (u64 __user *)reg->addr);
3259 		break;
3260 	case KVM_REG_S390_PFTOKEN:
3261 		r = get_user(vcpu->arch.pfault_token,
3262 			     (u64 __user *)reg->addr);
3263 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3264 			kvm_clear_async_pf_completion_queue(vcpu);
3265 		break;
3266 	case KVM_REG_S390_PFCOMPARE:
3267 		r = get_user(vcpu->arch.pfault_compare,
3268 			     (u64 __user *)reg->addr);
3269 		break;
3270 	case KVM_REG_S390_PFSELECT:
3271 		r = get_user(vcpu->arch.pfault_select,
3272 			     (u64 __user *)reg->addr);
3273 		break;
3274 	case KVM_REG_S390_PP:
3275 		r = get_user(vcpu->arch.sie_block->pp,
3276 			     (u64 __user *)reg->addr);
3277 		break;
3278 	case KVM_REG_S390_GBEA:
3279 		r = get_user(vcpu->arch.sie_block->gbea,
3280 			     (u64 __user *)reg->addr);
3281 		break;
3282 	default:
3283 		break;
3284 	}
3285 
3286 	return r;
3287 }
3288 
3289 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3290 {
3291 	kvm_s390_vcpu_initial_reset(vcpu);
3292 	return 0;
3293 }
3294 
3295 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3296 {
3297 	vcpu_load(vcpu);
3298 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
3299 	vcpu_put(vcpu);
3300 	return 0;
3301 }
3302 
3303 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3304 {
3305 	vcpu_load(vcpu);
3306 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
3307 	vcpu_put(vcpu);
3308 	return 0;
3309 }
3310 
3311 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3312 				  struct kvm_sregs *sregs)
3313 {
3314 	vcpu_load(vcpu);
3315 
3316 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
3317 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
3318 
3319 	vcpu_put(vcpu);
3320 	return 0;
3321 }
3322 
3323 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3324 				  struct kvm_sregs *sregs)
3325 {
3326 	vcpu_load(vcpu);
3327 
3328 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
3329 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
3330 
3331 	vcpu_put(vcpu);
3332 	return 0;
3333 }
3334 
3335 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3336 {
3337 	int ret = 0;
3338 
3339 	vcpu_load(vcpu);
3340 
3341 	if (test_fp_ctl(fpu->fpc)) {
3342 		ret = -EINVAL;
3343 		goto out;
3344 	}
3345 	vcpu->run->s.regs.fpc = fpu->fpc;
3346 	if (MACHINE_HAS_VX)
3347 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3348 				 (freg_t *) fpu->fprs);
3349 	else
3350 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
3351 
3352 out:
3353 	vcpu_put(vcpu);
3354 	return ret;
3355 }
3356 
3357 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3358 {
3359 	vcpu_load(vcpu);
3360 
3361 	/* make sure we have the latest values */
3362 	save_fpu_regs();
3363 	if (MACHINE_HAS_VX)
3364 		convert_vx_to_fp((freg_t *) fpu->fprs,
3365 				 (__vector128 *) vcpu->run->s.regs.vrs);
3366 	else
3367 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
3368 	fpu->fpc = vcpu->run->s.regs.fpc;
3369 
3370 	vcpu_put(vcpu);
3371 	return 0;
3372 }
3373 
3374 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3375 {
3376 	int rc = 0;
3377 
3378 	if (!is_vcpu_stopped(vcpu))
3379 		rc = -EBUSY;
3380 	else {
3381 		vcpu->run->psw_mask = psw.mask;
3382 		vcpu->run->psw_addr = psw.addr;
3383 	}
3384 	return rc;
3385 }
3386 
3387 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3388 				  struct kvm_translation *tr)
3389 {
3390 	return -EINVAL; /* not implemented yet */
3391 }
3392 
3393 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3394 			      KVM_GUESTDBG_USE_HW_BP | \
3395 			      KVM_GUESTDBG_ENABLE)
3396 
3397 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3398 					struct kvm_guest_debug *dbg)
3399 {
3400 	int rc = 0;
3401 
3402 	vcpu_load(vcpu);
3403 
3404 	vcpu->guest_debug = 0;
3405 	kvm_s390_clear_bp_data(vcpu);
3406 
3407 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3408 		rc = -EINVAL;
3409 		goto out;
3410 	}
3411 	if (!sclp.has_gpere) {
3412 		rc = -EINVAL;
3413 		goto out;
3414 	}
3415 
3416 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
3417 		vcpu->guest_debug = dbg->control;
3418 		/* enforce guest PER */
3419 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
3420 
3421 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3422 			rc = kvm_s390_import_bp_data(vcpu, dbg);
3423 	} else {
3424 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3425 		vcpu->arch.guestdbg.last_bp = 0;
3426 	}
3427 
3428 	if (rc) {
3429 		vcpu->guest_debug = 0;
3430 		kvm_s390_clear_bp_data(vcpu);
3431 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
3432 	}
3433 
3434 out:
3435 	vcpu_put(vcpu);
3436 	return rc;
3437 }
3438 
3439 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3440 				    struct kvm_mp_state *mp_state)
3441 {
3442 	int ret;
3443 
3444 	vcpu_load(vcpu);
3445 
3446 	/* CHECK_STOP and LOAD are not supported yet */
3447 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3448 				      KVM_MP_STATE_OPERATING;
3449 
3450 	vcpu_put(vcpu);
3451 	return ret;
3452 }
3453 
3454 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3455 				    struct kvm_mp_state *mp_state)
3456 {
3457 	int rc = 0;
3458 
3459 	vcpu_load(vcpu);
3460 
3461 	/* user space knows about this interface - let it control the state */
3462 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3463 
3464 	switch (mp_state->mp_state) {
3465 	case KVM_MP_STATE_STOPPED:
3466 		kvm_s390_vcpu_stop(vcpu);
3467 		break;
3468 	case KVM_MP_STATE_OPERATING:
3469 		kvm_s390_vcpu_start(vcpu);
3470 		break;
3471 	case KVM_MP_STATE_LOAD:
3472 	case KVM_MP_STATE_CHECK_STOP:
3473 		/* fall through - CHECK_STOP and LOAD are not supported yet */
3474 	default:
3475 		rc = -ENXIO;
3476 	}
3477 
3478 	vcpu_put(vcpu);
3479 	return rc;
3480 }
3481 
3482 static bool ibs_enabled(struct kvm_vcpu *vcpu)
3483 {
3484 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
3485 }
3486 
3487 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3488 {
3489 retry:
3490 	kvm_s390_vcpu_request_handled(vcpu);
3491 	if (!kvm_request_pending(vcpu))
3492 		return 0;
3493 	/*
3494 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
3495 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
3496 	 * This ensures that the ipte instruction for this request has
3497 	 * already finished. We might race against a second unmapper that
3498 	 * wants to set the blocking bit. Lets just retry the request loop.
3499 	 */
3500 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
3501 		int rc;
3502 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
3503 					  kvm_s390_get_prefix(vcpu),
3504 					  PAGE_SIZE * 2, PROT_WRITE);
3505 		if (rc) {
3506 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
3507 			return rc;
3508 		}
3509 		goto retry;
3510 	}
3511 
3512 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3513 		vcpu->arch.sie_block->ihcpu = 0xffff;
3514 		goto retry;
3515 	}
3516 
3517 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3518 		if (!ibs_enabled(vcpu)) {
3519 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
3520 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
3521 		}
3522 		goto retry;
3523 	}
3524 
3525 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3526 		if (ibs_enabled(vcpu)) {
3527 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
3528 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
3529 		}
3530 		goto retry;
3531 	}
3532 
3533 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3534 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3535 		goto retry;
3536 	}
3537 
3538 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3539 		/*
3540 		 * Disable CMM virtualization; we will emulate the ESSA
3541 		 * instruction manually, in order to provide additional
3542 		 * functionalities needed for live migration.
3543 		 */
3544 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3545 		goto retry;
3546 	}
3547 
3548 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3549 		/*
3550 		 * Re-enable CMM virtualization if CMMA is available and
3551 		 * CMM has been used.
3552 		 */
3553 		if ((vcpu->kvm->arch.use_cmma) &&
3554 		    (vcpu->kvm->mm->context.uses_cmm))
3555 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3556 		goto retry;
3557 	}
3558 
3559 	/* nothing to do, just clear the request */
3560 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
3561 	/* we left the vsie handler, nothing to do, just clear the request */
3562 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
3563 
3564 	return 0;
3565 }
3566 
3567 void kvm_s390_set_tod_clock(struct kvm *kvm,
3568 			    const struct kvm_s390_vm_tod_clock *gtod)
3569 {
3570 	struct kvm_vcpu *vcpu;
3571 	struct kvm_s390_tod_clock_ext htod;
3572 	int i;
3573 
3574 	mutex_lock(&kvm->lock);
3575 	preempt_disable();
3576 
3577 	get_tod_clock_ext((char *)&htod);
3578 
3579 	kvm->arch.epoch = gtod->tod - htod.tod;
3580 	kvm->arch.epdx = 0;
3581 	if (test_kvm_facility(kvm, 139)) {
3582 		kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3583 		if (kvm->arch.epoch > gtod->tod)
3584 			kvm->arch.epdx -= 1;
3585 	}
3586 
3587 	kvm_s390_vcpu_block_all(kvm);
3588 	kvm_for_each_vcpu(i, vcpu, kvm) {
3589 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3590 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
3591 	}
3592 
3593 	kvm_s390_vcpu_unblock_all(kvm);
3594 	preempt_enable();
3595 	mutex_unlock(&kvm->lock);
3596 }
3597 
3598 /**
3599  * kvm_arch_fault_in_page - fault-in guest page if necessary
3600  * @vcpu: The corresponding virtual cpu
3601  * @gpa: Guest physical address
3602  * @writable: Whether the page should be writable or not
3603  *
3604  * Make sure that a guest page has been faulted-in on the host.
3605  *
3606  * Return: Zero on success, negative error code otherwise.
3607  */
3608 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
3609 {
3610 	return gmap_fault(vcpu->arch.gmap, gpa,
3611 			  writable ? FAULT_FLAG_WRITE : 0);
3612 }
3613 
3614 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3615 				      unsigned long token)
3616 {
3617 	struct kvm_s390_interrupt inti;
3618 	struct kvm_s390_irq irq;
3619 
3620 	if (start_token) {
3621 		irq.u.ext.ext_params2 = token;
3622 		irq.type = KVM_S390_INT_PFAULT_INIT;
3623 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3624 	} else {
3625 		inti.type = KVM_S390_INT_PFAULT_DONE;
3626 		inti.parm64 = token;
3627 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3628 	}
3629 }
3630 
3631 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3632 				     struct kvm_async_pf *work)
3633 {
3634 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3635 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3636 }
3637 
3638 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3639 				 struct kvm_async_pf *work)
3640 {
3641 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3642 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3643 }
3644 
3645 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3646 			       struct kvm_async_pf *work)
3647 {
3648 	/* s390 will always inject the page directly */
3649 }
3650 
3651 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3652 {
3653 	/*
3654 	 * s390 will always inject the page directly,
3655 	 * but we still want check_async_completion to cleanup
3656 	 */
3657 	return true;
3658 }
3659 
3660 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3661 {
3662 	hva_t hva;
3663 	struct kvm_arch_async_pf arch;
3664 	int rc;
3665 
3666 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3667 		return 0;
3668 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3669 	    vcpu->arch.pfault_compare)
3670 		return 0;
3671 	if (psw_extint_disabled(vcpu))
3672 		return 0;
3673 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
3674 		return 0;
3675 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
3676 		return 0;
3677 	if (!vcpu->arch.gmap->pfault_enabled)
3678 		return 0;
3679 
3680 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3681 	hva += current->thread.gmap_addr & ~PAGE_MASK;
3682 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3683 		return 0;
3684 
3685 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3686 	return rc;
3687 }
3688 
3689 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
3690 {
3691 	int rc, cpuflags;
3692 
3693 	/*
3694 	 * On s390 notifications for arriving pages will be delivered directly
3695 	 * to the guest but the house keeping for completed pfaults is
3696 	 * handled outside the worker.
3697 	 */
3698 	kvm_check_async_pf_completion(vcpu);
3699 
3700 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3701 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
3702 
3703 	if (need_resched())
3704 		schedule();
3705 
3706 	if (test_cpu_flag(CIF_MCCK_PENDING))
3707 		s390_handle_mcck();
3708 
3709 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3710 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
3711 		if (rc)
3712 			return rc;
3713 	}
3714 
3715 	rc = kvm_s390_handle_requests(vcpu);
3716 	if (rc)
3717 		return rc;
3718 
3719 	if (guestdbg_enabled(vcpu)) {
3720 		kvm_s390_backup_guest_per_regs(vcpu);
3721 		kvm_s390_patch_guest_per_regs(vcpu);
3722 	}
3723 
3724 	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3725 
3726 	vcpu->arch.sie_block->icptcode = 0;
3727 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3728 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3729 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
3730 
3731 	return 0;
3732 }
3733 
3734 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3735 {
3736 	struct kvm_s390_pgm_info pgm_info = {
3737 		.code = PGM_ADDRESSING,
3738 	};
3739 	u8 opcode, ilen;
3740 	int rc;
3741 
3742 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3743 	trace_kvm_s390_sie_fault(vcpu);
3744 
3745 	/*
3746 	 * We want to inject an addressing exception, which is defined as a
3747 	 * suppressing or terminating exception. However, since we came here
3748 	 * by a DAT access exception, the PSW still points to the faulting
3749 	 * instruction since DAT exceptions are nullifying. So we've got
3750 	 * to look up the current opcode to get the length of the instruction
3751 	 * to be able to forward the PSW.
3752 	 */
3753 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
3754 	ilen = insn_length(opcode);
3755 	if (rc < 0) {
3756 		return rc;
3757 	} else if (rc) {
3758 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
3759 		 * Forward by arbitrary ilc, injection will take care of
3760 		 * nullification if necessary.
3761 		 */
3762 		pgm_info = vcpu->arch.pgm;
3763 		ilen = 4;
3764 	}
3765 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3766 	kvm_s390_forward_psw(vcpu, ilen);
3767 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
3768 }
3769 
3770 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3771 {
3772 	struct mcck_volatile_info *mcck_info;
3773 	struct sie_page *sie_page;
3774 
3775 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3776 		   vcpu->arch.sie_block->icptcode);
3777 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3778 
3779 	if (guestdbg_enabled(vcpu))
3780 		kvm_s390_restore_guest_per_regs(vcpu);
3781 
3782 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3783 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
3784 
3785 	if (exit_reason == -EINTR) {
3786 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
3787 		sie_page = container_of(vcpu->arch.sie_block,
3788 					struct sie_page, sie_block);
3789 		mcck_info = &sie_page->mcck_info;
3790 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
3791 		return 0;
3792 	}
3793 
3794 	if (vcpu->arch.sie_block->icptcode > 0) {
3795 		int rc = kvm_handle_sie_intercept(vcpu);
3796 
3797 		if (rc != -EOPNOTSUPP)
3798 			return rc;
3799 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3800 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3801 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3802 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3803 		return -EREMOTE;
3804 	} else if (exit_reason != -EFAULT) {
3805 		vcpu->stat.exit_null++;
3806 		return 0;
3807 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
3808 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3809 		vcpu->run->s390_ucontrol.trans_exc_code =
3810 						current->thread.gmap_addr;
3811 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
3812 		return -EREMOTE;
3813 	} else if (current->thread.gmap_pfault) {
3814 		trace_kvm_s390_major_guest_pfault(vcpu);
3815 		current->thread.gmap_pfault = 0;
3816 		if (kvm_arch_setup_async_pf(vcpu))
3817 			return 0;
3818 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
3819 	}
3820 	return vcpu_post_run_fault_in_sie(vcpu);
3821 }
3822 
3823 static int __vcpu_run(struct kvm_vcpu *vcpu)
3824 {
3825 	int rc, exit_reason;
3826 
3827 	/*
3828 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3829 	 * ning the guest), so that memslots (and other stuff) are protected
3830 	 */
3831 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3832 
3833 	do {
3834 		rc = vcpu_pre_run(vcpu);
3835 		if (rc)
3836 			break;
3837 
3838 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3839 		/*
3840 		 * As PF_VCPU will be used in fault handler, between
3841 		 * guest_enter and guest_exit should be no uaccess.
3842 		 */
3843 		local_irq_disable();
3844 		guest_enter_irqoff();
3845 		__disable_cpu_timer_accounting(vcpu);
3846 		local_irq_enable();
3847 		exit_reason = sie64a(vcpu->arch.sie_block,
3848 				     vcpu->run->s.regs.gprs);
3849 		local_irq_disable();
3850 		__enable_cpu_timer_accounting(vcpu);
3851 		guest_exit_irqoff();
3852 		local_irq_enable();
3853 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3854 
3855 		rc = vcpu_post_run(vcpu, exit_reason);
3856 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3857 
3858 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3859 	return rc;
3860 }
3861 
3862 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3863 {
3864 	struct runtime_instr_cb *riccb;
3865 	struct gs_cb *gscb;
3866 
3867 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
3868 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
3869 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3870 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3871 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3872 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3873 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3874 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
3875 		/* some control register changes require a tlb flush */
3876 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3877 	}
3878 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
3879 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
3880 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3881 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3882 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3883 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3884 	}
3885 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3886 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3887 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3888 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
3889 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3890 			kvm_clear_async_pf_completion_queue(vcpu);
3891 	}
3892 	/*
3893 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
3894 	 * we should enable RI here instead of doing the lazy enablement.
3895 	 */
3896 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
3897 	    test_kvm_facility(vcpu->kvm, 64) &&
3898 	    riccb->v &&
3899 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
3900 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
3901 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
3902 	}
3903 	/*
3904 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
3905 	 * we should enable GS here instead of doing the lazy enablement.
3906 	 */
3907 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3908 	    test_kvm_facility(vcpu->kvm, 133) &&
3909 	    gscb->gssm &&
3910 	    !vcpu->arch.gs_enabled) {
3911 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3912 		vcpu->arch.sie_block->ecb |= ECB_GS;
3913 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3914 		vcpu->arch.gs_enabled = 1;
3915 	}
3916 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3917 	    test_kvm_facility(vcpu->kvm, 82)) {
3918 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3919 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3920 	}
3921 	save_access_regs(vcpu->arch.host_acrs);
3922 	restore_access_regs(vcpu->run->s.regs.acrs);
3923 	/* save host (userspace) fprs/vrs */
3924 	save_fpu_regs();
3925 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3926 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3927 	if (MACHINE_HAS_VX)
3928 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3929 	else
3930 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3931 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3932 	if (test_fp_ctl(current->thread.fpu.fpc))
3933 		/* User space provided an invalid FPC, let's clear it */
3934 		current->thread.fpu.fpc = 0;
3935 	if (MACHINE_HAS_GS) {
3936 		preempt_disable();
3937 		__ctl_set_bit(2, 4);
3938 		if (current->thread.gs_cb) {
3939 			vcpu->arch.host_gscb = current->thread.gs_cb;
3940 			save_gs_cb(vcpu->arch.host_gscb);
3941 		}
3942 		if (vcpu->arch.gs_enabled) {
3943 			current->thread.gs_cb = (struct gs_cb *)
3944 						&vcpu->run->s.regs.gscb;
3945 			restore_gs_cb(current->thread.gs_cb);
3946 		}
3947 		preempt_enable();
3948 	}
3949 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
3950 
3951 	kvm_run->kvm_dirty_regs = 0;
3952 }
3953 
3954 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3955 {
3956 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3957 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3958 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3959 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
3960 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
3961 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3962 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3963 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3964 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3965 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3966 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3967 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3968 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
3969 	save_access_regs(vcpu->run->s.regs.acrs);
3970 	restore_access_regs(vcpu->arch.host_acrs);
3971 	/* Save guest register state */
3972 	save_fpu_regs();
3973 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3974 	/* Restore will be done lazily at return */
3975 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3976 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
3977 	if (MACHINE_HAS_GS) {
3978 		__ctl_set_bit(2, 4);
3979 		if (vcpu->arch.gs_enabled)
3980 			save_gs_cb(current->thread.gs_cb);
3981 		preempt_disable();
3982 		current->thread.gs_cb = vcpu->arch.host_gscb;
3983 		restore_gs_cb(vcpu->arch.host_gscb);
3984 		preempt_enable();
3985 		if (!vcpu->arch.host_gscb)
3986 			__ctl_clear_bit(2, 4);
3987 		vcpu->arch.host_gscb = NULL;
3988 	}
3989 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
3990 }
3991 
3992 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3993 {
3994 	int rc;
3995 
3996 	if (kvm_run->immediate_exit)
3997 		return -EINTR;
3998 
3999 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4000 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4001 		return -EINVAL;
4002 
4003 	vcpu_load(vcpu);
4004 
4005 	if (guestdbg_exit_pending(vcpu)) {
4006 		kvm_s390_prepare_debug_exit(vcpu);
4007 		rc = 0;
4008 		goto out;
4009 	}
4010 
4011 	kvm_sigset_activate(vcpu);
4012 
4013 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4014 		kvm_s390_vcpu_start(vcpu);
4015 	} else if (is_vcpu_stopped(vcpu)) {
4016 		pr_err_ratelimited("can't run stopped vcpu %d\n",
4017 				   vcpu->vcpu_id);
4018 		rc = -EINVAL;
4019 		goto out;
4020 	}
4021 
4022 	sync_regs(vcpu, kvm_run);
4023 	enable_cpu_timer_accounting(vcpu);
4024 
4025 	might_fault();
4026 	rc = __vcpu_run(vcpu);
4027 
4028 	if (signal_pending(current) && !rc) {
4029 		kvm_run->exit_reason = KVM_EXIT_INTR;
4030 		rc = -EINTR;
4031 	}
4032 
4033 	if (guestdbg_exit_pending(vcpu) && !rc)  {
4034 		kvm_s390_prepare_debug_exit(vcpu);
4035 		rc = 0;
4036 	}
4037 
4038 	if (rc == -EREMOTE) {
4039 		/* userspace support is needed, kvm_run has been prepared */
4040 		rc = 0;
4041 	}
4042 
4043 	disable_cpu_timer_accounting(vcpu);
4044 	store_regs(vcpu, kvm_run);
4045 
4046 	kvm_sigset_deactivate(vcpu);
4047 
4048 	vcpu->stat.exit_userspace++;
4049 out:
4050 	vcpu_put(vcpu);
4051 	return rc;
4052 }
4053 
4054 /*
4055  * store status at address
4056  * we use have two special cases:
4057  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4058  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4059  */
4060 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4061 {
4062 	unsigned char archmode = 1;
4063 	freg_t fprs[NUM_FPRS];
4064 	unsigned int px;
4065 	u64 clkcomp, cputm;
4066 	int rc;
4067 
4068 	px = kvm_s390_get_prefix(vcpu);
4069 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4070 		if (write_guest_abs(vcpu, 163, &archmode, 1))
4071 			return -EFAULT;
4072 		gpa = 0;
4073 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4074 		if (write_guest_real(vcpu, 163, &archmode, 1))
4075 			return -EFAULT;
4076 		gpa = px;
4077 	} else
4078 		gpa -= __LC_FPREGS_SAVE_AREA;
4079 
4080 	/* manually convert vector registers if necessary */
4081 	if (MACHINE_HAS_VX) {
4082 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4083 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4084 				     fprs, 128);
4085 	} else {
4086 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4087 				     vcpu->run->s.regs.fprs, 128);
4088 	}
4089 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4090 			      vcpu->run->s.regs.gprs, 128);
4091 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4092 			      &vcpu->arch.sie_block->gpsw, 16);
4093 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4094 			      &px, 4);
4095 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
4096 			      &vcpu->run->s.regs.fpc, 4);
4097 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4098 			      &vcpu->arch.sie_block->todpr, 4);
4099 	cputm = kvm_s390_get_cpu_timer(vcpu);
4100 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4101 			      &cputm, 8);
4102 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
4103 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4104 			      &clkcomp, 8);
4105 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4106 			      &vcpu->run->s.regs.acrs, 64);
4107 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4108 			      &vcpu->arch.sie_block->gcr, 128);
4109 	return rc ? -EFAULT : 0;
4110 }
4111 
4112 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4113 {
4114 	/*
4115 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
4116 	 * switch in the run ioctl. Let's update our copies before we save
4117 	 * it into the save area
4118 	 */
4119 	save_fpu_regs();
4120 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4121 	save_access_regs(vcpu->run->s.regs.acrs);
4122 
4123 	return kvm_s390_store_status_unloaded(vcpu, addr);
4124 }
4125 
4126 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4127 {
4128 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
4129 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
4130 }
4131 
4132 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4133 {
4134 	unsigned int i;
4135 	struct kvm_vcpu *vcpu;
4136 
4137 	kvm_for_each_vcpu(i, vcpu, kvm) {
4138 		__disable_ibs_on_vcpu(vcpu);
4139 	}
4140 }
4141 
4142 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4143 {
4144 	if (!sclp.has_ibs)
4145 		return;
4146 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
4147 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
4148 }
4149 
4150 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4151 {
4152 	int i, online_vcpus, started_vcpus = 0;
4153 
4154 	if (!is_vcpu_stopped(vcpu))
4155 		return;
4156 
4157 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
4158 	/* Only one cpu at a time may enter/leave the STOPPED state. */
4159 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
4160 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4161 
4162 	for (i = 0; i < online_vcpus; i++) {
4163 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4164 			started_vcpus++;
4165 	}
4166 
4167 	if (started_vcpus == 0) {
4168 		/* we're the only active VCPU -> speed it up */
4169 		__enable_ibs_on_vcpu(vcpu);
4170 	} else if (started_vcpus == 1) {
4171 		/*
4172 		 * As we are starting a second VCPU, we have to disable
4173 		 * the IBS facility on all VCPUs to remove potentially
4174 		 * oustanding ENABLE requests.
4175 		 */
4176 		__disable_ibs_on_all_vcpus(vcpu->kvm);
4177 	}
4178 
4179 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
4180 	/*
4181 	 * Another VCPU might have used IBS while we were offline.
4182 	 * Let's play safe and flush the VCPU at startup.
4183 	 */
4184 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4185 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4186 	return;
4187 }
4188 
4189 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4190 {
4191 	int i, online_vcpus, started_vcpus = 0;
4192 	struct kvm_vcpu *started_vcpu = NULL;
4193 
4194 	if (is_vcpu_stopped(vcpu))
4195 		return;
4196 
4197 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
4198 	/* Only one cpu at a time may enter/leave the STOPPED state. */
4199 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
4200 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4201 
4202 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
4203 	kvm_s390_clear_stop_irq(vcpu);
4204 
4205 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
4206 	__disable_ibs_on_vcpu(vcpu);
4207 
4208 	for (i = 0; i < online_vcpus; i++) {
4209 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4210 			started_vcpus++;
4211 			started_vcpu = vcpu->kvm->vcpus[i];
4212 		}
4213 	}
4214 
4215 	if (started_vcpus == 1) {
4216 		/*
4217 		 * As we only have one VCPU left, we want to enable the
4218 		 * IBS facility for that VCPU to speed it up.
4219 		 */
4220 		__enable_ibs_on_vcpu(started_vcpu);
4221 	}
4222 
4223 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4224 	return;
4225 }
4226 
4227 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4228 				     struct kvm_enable_cap *cap)
4229 {
4230 	int r;
4231 
4232 	if (cap->flags)
4233 		return -EINVAL;
4234 
4235 	switch (cap->cap) {
4236 	case KVM_CAP_S390_CSS_SUPPORT:
4237 		if (!vcpu->kvm->arch.css_support) {
4238 			vcpu->kvm->arch.css_support = 1;
4239 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
4240 			trace_kvm_s390_enable_css(vcpu->kvm);
4241 		}
4242 		r = 0;
4243 		break;
4244 	default:
4245 		r = -EINVAL;
4246 		break;
4247 	}
4248 	return r;
4249 }
4250 
4251 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4252 				  struct kvm_s390_mem_op *mop)
4253 {
4254 	void __user *uaddr = (void __user *)mop->buf;
4255 	void *tmpbuf = NULL;
4256 	int r, srcu_idx;
4257 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4258 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
4259 
4260 	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
4261 		return -EINVAL;
4262 
4263 	if (mop->size > MEM_OP_MAX_SIZE)
4264 		return -E2BIG;
4265 
4266 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4267 		tmpbuf = vmalloc(mop->size);
4268 		if (!tmpbuf)
4269 			return -ENOMEM;
4270 	}
4271 
4272 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4273 
4274 	switch (mop->op) {
4275 	case KVM_S390_MEMOP_LOGICAL_READ:
4276 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4277 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4278 					    mop->size, GACC_FETCH);
4279 			break;
4280 		}
4281 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4282 		if (r == 0) {
4283 			if (copy_to_user(uaddr, tmpbuf, mop->size))
4284 				r = -EFAULT;
4285 		}
4286 		break;
4287 	case KVM_S390_MEMOP_LOGICAL_WRITE:
4288 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
4289 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4290 					    mop->size, GACC_STORE);
4291 			break;
4292 		}
4293 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4294 			r = -EFAULT;
4295 			break;
4296 		}
4297 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4298 		break;
4299 	default:
4300 		r = -EINVAL;
4301 	}
4302 
4303 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4304 
4305 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4306 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4307 
4308 	vfree(tmpbuf);
4309 	return r;
4310 }
4311 
4312 long kvm_arch_vcpu_async_ioctl(struct file *filp,
4313 			       unsigned int ioctl, unsigned long arg)
4314 {
4315 	struct kvm_vcpu *vcpu = filp->private_data;
4316 	void __user *argp = (void __user *)arg;
4317 
4318 	switch (ioctl) {
4319 	case KVM_S390_IRQ: {
4320 		struct kvm_s390_irq s390irq;
4321 
4322 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
4323 			return -EFAULT;
4324 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
4325 	}
4326 	case KVM_S390_INTERRUPT: {
4327 		struct kvm_s390_interrupt s390int;
4328 		struct kvm_s390_irq s390irq = {};
4329 
4330 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
4331 			return -EFAULT;
4332 		if (s390int_to_s390irq(&s390int, &s390irq))
4333 			return -EINVAL;
4334 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
4335 	}
4336 	}
4337 	return -ENOIOCTLCMD;
4338 }
4339 
4340 long kvm_arch_vcpu_ioctl(struct file *filp,
4341 			 unsigned int ioctl, unsigned long arg)
4342 {
4343 	struct kvm_vcpu *vcpu = filp->private_data;
4344 	void __user *argp = (void __user *)arg;
4345 	int idx;
4346 	long r;
4347 
4348 	vcpu_load(vcpu);
4349 
4350 	switch (ioctl) {
4351 	case KVM_S390_STORE_STATUS:
4352 		idx = srcu_read_lock(&vcpu->kvm->srcu);
4353 		r = kvm_s390_vcpu_store_status(vcpu, arg);
4354 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
4355 		break;
4356 	case KVM_S390_SET_INITIAL_PSW: {
4357 		psw_t psw;
4358 
4359 		r = -EFAULT;
4360 		if (copy_from_user(&psw, argp, sizeof(psw)))
4361 			break;
4362 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4363 		break;
4364 	}
4365 	case KVM_S390_INITIAL_RESET:
4366 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4367 		break;
4368 	case KVM_SET_ONE_REG:
4369 	case KVM_GET_ONE_REG: {
4370 		struct kvm_one_reg reg;
4371 		r = -EFAULT;
4372 		if (copy_from_user(&reg, argp, sizeof(reg)))
4373 			break;
4374 		if (ioctl == KVM_SET_ONE_REG)
4375 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4376 		else
4377 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4378 		break;
4379 	}
4380 #ifdef CONFIG_KVM_S390_UCONTROL
4381 	case KVM_S390_UCAS_MAP: {
4382 		struct kvm_s390_ucas_mapping ucasmap;
4383 
4384 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4385 			r = -EFAULT;
4386 			break;
4387 		}
4388 
4389 		if (!kvm_is_ucontrol(vcpu->kvm)) {
4390 			r = -EINVAL;
4391 			break;
4392 		}
4393 
4394 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4395 				     ucasmap.vcpu_addr, ucasmap.length);
4396 		break;
4397 	}
4398 	case KVM_S390_UCAS_UNMAP: {
4399 		struct kvm_s390_ucas_mapping ucasmap;
4400 
4401 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4402 			r = -EFAULT;
4403 			break;
4404 		}
4405 
4406 		if (!kvm_is_ucontrol(vcpu->kvm)) {
4407 			r = -EINVAL;
4408 			break;
4409 		}
4410 
4411 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4412 			ucasmap.length);
4413 		break;
4414 	}
4415 #endif
4416 	case KVM_S390_VCPU_FAULT: {
4417 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
4418 		break;
4419 	}
4420 	case KVM_ENABLE_CAP:
4421 	{
4422 		struct kvm_enable_cap cap;
4423 		r = -EFAULT;
4424 		if (copy_from_user(&cap, argp, sizeof(cap)))
4425 			break;
4426 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4427 		break;
4428 	}
4429 	case KVM_S390_MEM_OP: {
4430 		struct kvm_s390_mem_op mem_op;
4431 
4432 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4433 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4434 		else
4435 			r = -EFAULT;
4436 		break;
4437 	}
4438 	case KVM_S390_SET_IRQ_STATE: {
4439 		struct kvm_s390_irq_state irq_state;
4440 
4441 		r = -EFAULT;
4442 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4443 			break;
4444 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4445 		    irq_state.len == 0 ||
4446 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4447 			r = -EINVAL;
4448 			break;
4449 		}
4450 		/* do not use irq_state.flags, it will break old QEMUs */
4451 		r = kvm_s390_set_irq_state(vcpu,
4452 					   (void __user *) irq_state.buf,
4453 					   irq_state.len);
4454 		break;
4455 	}
4456 	case KVM_S390_GET_IRQ_STATE: {
4457 		struct kvm_s390_irq_state irq_state;
4458 
4459 		r = -EFAULT;
4460 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4461 			break;
4462 		if (irq_state.len == 0) {
4463 			r = -EINVAL;
4464 			break;
4465 		}
4466 		/* do not use irq_state.flags, it will break old QEMUs */
4467 		r = kvm_s390_get_irq_state(vcpu,
4468 					   (__u8 __user *)  irq_state.buf,
4469 					   irq_state.len);
4470 		break;
4471 	}
4472 	default:
4473 		r = -ENOTTY;
4474 	}
4475 
4476 	vcpu_put(vcpu);
4477 	return r;
4478 }
4479 
4480 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4481 {
4482 #ifdef CONFIG_KVM_S390_UCONTROL
4483 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4484 		 && (kvm_is_ucontrol(vcpu->kvm))) {
4485 		vmf->page = virt_to_page(vcpu->arch.sie_block);
4486 		get_page(vmf->page);
4487 		return 0;
4488 	}
4489 #endif
4490 	return VM_FAULT_SIGBUS;
4491 }
4492 
4493 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4494 			    unsigned long npages)
4495 {
4496 	return 0;
4497 }
4498 
4499 /* Section: memory related */
4500 int kvm_arch_prepare_memory_region(struct kvm *kvm,
4501 				   struct kvm_memory_slot *memslot,
4502 				   const struct kvm_userspace_memory_region *mem,
4503 				   enum kvm_mr_change change)
4504 {
4505 	/* A few sanity checks. We can have memory slots which have to be
4506 	   located/ended at a segment boundary (1MB). The memory in userland is
4507 	   ok to be fragmented into various different vmas. It is okay to mmap()
4508 	   and munmap() stuff in this slot after doing this call at any time */
4509 
4510 	if (mem->userspace_addr & 0xffffful)
4511 		return -EINVAL;
4512 
4513 	if (mem->memory_size & 0xffffful)
4514 		return -EINVAL;
4515 
4516 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4517 		return -EINVAL;
4518 
4519 	return 0;
4520 }
4521 
4522 void kvm_arch_commit_memory_region(struct kvm *kvm,
4523 				const struct kvm_userspace_memory_region *mem,
4524 				const struct kvm_memory_slot *old,
4525 				const struct kvm_memory_slot *new,
4526 				enum kvm_mr_change change)
4527 {
4528 	int rc = 0;
4529 
4530 	switch (change) {
4531 	case KVM_MR_DELETE:
4532 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4533 					old->npages * PAGE_SIZE);
4534 		break;
4535 	case KVM_MR_MOVE:
4536 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4537 					old->npages * PAGE_SIZE);
4538 		if (rc)
4539 			break;
4540 		/* FALLTHROUGH */
4541 	case KVM_MR_CREATE:
4542 		rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4543 				      mem->guest_phys_addr, mem->memory_size);
4544 		break;
4545 	case KVM_MR_FLAGS_ONLY:
4546 		break;
4547 	default:
4548 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4549 	}
4550 	if (rc)
4551 		pr_warn("failed to commit memory region\n");
4552 	return;
4553 }
4554 
4555 static inline unsigned long nonhyp_mask(int i)
4556 {
4557 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4558 
4559 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4560 }
4561 
4562 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4563 {
4564 	vcpu->valid_wakeup = false;
4565 }
4566 
4567 static int __init kvm_s390_init(void)
4568 {
4569 	int i;
4570 
4571 	if (!sclp.has_sief2) {
4572 		pr_info("SIE is not available\n");
4573 		return -ENODEV;
4574 	}
4575 
4576 	if (nested && hpage) {
4577 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
4578 		return -EINVAL;
4579 	}
4580 
4581 	for (i = 0; i < 16; i++)
4582 		kvm_s390_fac_base[i] |=
4583 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4584 
4585 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
4586 }
4587 
4588 static void __exit kvm_s390_exit(void)
4589 {
4590 	kvm_exit();
4591 }
4592 
4593 module_init(kvm_s390_init);
4594 module_exit(kvm_s390_exit);
4595 
4596 /*
4597  * Enable autoloading of the kvm module.
4598  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4599  * since x86 takes a different approach.
4600  */
4601 #include <linux/miscdevice.h>
4602 MODULE_ALIAS_MISCDEV(KVM_MINOR);
4603 MODULE_ALIAS("devname:kvm");
4604