xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision a957cbc0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hosting IBM Z kernel virtual machines (s390x)
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10  *               Jason J. Herne <jjherne@us.ibm.com>
11  */
12 
13 #define KMSG_COMPONENT "kvm-s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/mman.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/pgtable.h>
34 #include <linux/mmu_notifier.h>
35 
36 #include <asm/asm-offsets.h>
37 #include <asm/lowcore.h>
38 #include <asm/stp.h>
39 #include <asm/gmap.h>
40 #include <asm/nmi.h>
41 #include <asm/switch_to.h>
42 #include <asm/isc.h>
43 #include <asm/sclp.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
46 #include <asm/ap.h>
47 #include <asm/uv.h>
48 #include <asm/fpu/api.h>
49 #include "kvm-s390.h"
50 #include "gaccess.h"
51 #include "pci.h"
52 
53 #define CREATE_TRACE_POINTS
54 #include "trace.h"
55 #include "trace-s390.h"
56 
57 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
58 #define LOCAL_IRQS 32
59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
61 
62 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63 	KVM_GENERIC_VM_STATS(),
64 	STATS_DESC_COUNTER(VM, inject_io),
65 	STATS_DESC_COUNTER(VM, inject_float_mchk),
66 	STATS_DESC_COUNTER(VM, inject_pfault_done),
67 	STATS_DESC_COUNTER(VM, inject_service_signal),
68 	STATS_DESC_COUNTER(VM, inject_virtio),
69 	STATS_DESC_COUNTER(VM, aen_forward)
70 };
71 
72 const struct kvm_stats_header kvm_vm_stats_header = {
73 	.name_size = KVM_STATS_NAME_SIZE,
74 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75 	.id_offset = sizeof(struct kvm_stats_header),
76 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78 		       sizeof(kvm_vm_stats_desc),
79 };
80 
81 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82 	KVM_GENERIC_VCPU_STATS(),
83 	STATS_DESC_COUNTER(VCPU, exit_userspace),
84 	STATS_DESC_COUNTER(VCPU, exit_null),
85 	STATS_DESC_COUNTER(VCPU, exit_external_request),
86 	STATS_DESC_COUNTER(VCPU, exit_io_request),
87 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
89 	STATS_DESC_COUNTER(VCPU, exit_validity),
90 	STATS_DESC_COUNTER(VCPU, exit_instruction),
91 	STATS_DESC_COUNTER(VCPU, exit_pei),
92 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
94 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
96 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
97 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
101 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
102 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
103 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
106 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109 	STATS_DESC_COUNTER(VCPU, deliver_program),
110 	STATS_DESC_COUNTER(VCPU, deliver_io),
111 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
113 	STATS_DESC_COUNTER(VCPU, inject_ckc),
114 	STATS_DESC_COUNTER(VCPU, inject_cputm),
115 	STATS_DESC_COUNTER(VCPU, inject_external_call),
116 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117 	STATS_DESC_COUNTER(VCPU, inject_mchk),
118 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119 	STATS_DESC_COUNTER(VCPU, inject_program),
120 	STATS_DESC_COUNTER(VCPU, inject_restart),
121 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
124 	STATS_DESC_COUNTER(VCPU, instruction_gs),
125 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
126 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
130 	STATS_DESC_COUNTER(VCPU, instruction_sck),
131 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
133 	STATS_DESC_COUNTER(VCPU, instruction_spx),
134 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
135 	STATS_DESC_COUNTER(VCPU, instruction_stap),
136 	STATS_DESC_COUNTER(VCPU, instruction_iske),
137 	STATS_DESC_COUNTER(VCPU, instruction_ri),
138 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139 	STATS_DESC_COUNTER(VCPU, instruction_sske),
140 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
142 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
143 	STATS_DESC_COUNTER(VCPU, instruction_tb),
144 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
145 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
146 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
147 	STATS_DESC_COUNTER(VCPU, instruction_sie),
148 	STATS_DESC_COUNTER(VCPU, instruction_essa),
149 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
166 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
167 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
168 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
169 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
170 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
171 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
172 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
173 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
174 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
175 	STATS_DESC_COUNTER(VCPU, pfault_sync)
176 };
177 
178 const struct kvm_stats_header kvm_vcpu_stats_header = {
179 	.name_size = KVM_STATS_NAME_SIZE,
180 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
181 	.id_offset = sizeof(struct kvm_stats_header),
182 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
183 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
184 		       sizeof(kvm_vcpu_stats_desc),
185 };
186 
187 /* allow nested virtualization in KVM (if enabled by user space) */
188 static int nested;
189 module_param(nested, int, S_IRUGO);
190 MODULE_PARM_DESC(nested, "Nested virtualization support");
191 
192 /* allow 1m huge page guest backing, if !nested */
193 static int hpage;
194 module_param(hpage, int, 0444);
195 MODULE_PARM_DESC(hpage, "1m huge page backing support");
196 
197 /* maximum percentage of steal time for polling.  >100 is treated like 100 */
198 static u8 halt_poll_max_steal = 10;
199 module_param(halt_poll_max_steal, byte, 0644);
200 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
201 
202 /* if set to true, the GISA will be initialized and used if available */
203 static bool use_gisa  = true;
204 module_param(use_gisa, bool, 0644);
205 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
206 
207 /* maximum diag9c forwarding per second */
208 unsigned int diag9c_forwarding_hz;
209 module_param(diag9c_forwarding_hz, uint, 0644);
210 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
211 
212 /*
213  * allow asynchronous deinit for protected guests; enable by default since
214  * the feature is opt-in anyway
215  */
216 static int async_destroy = 1;
217 module_param(async_destroy, int, 0444);
218 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
219 
220 /*
221  * For now we handle at most 16 double words as this is what the s390 base
222  * kernel handles and stores in the prefix page. If we ever need to go beyond
223  * this, this requires changes to code, but the external uapi can stay.
224  */
225 #define SIZE_INTERNAL 16
226 
227 /*
228  * Base feature mask that defines default mask for facilities. Consists of the
229  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
230  */
231 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
232 /*
233  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
234  * and defines the facilities that can be enabled via a cpu model.
235  */
236 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
237 
238 static unsigned long kvm_s390_fac_size(void)
239 {
240 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
241 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
242 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
243 		sizeof(stfle_fac_list));
244 
245 	return SIZE_INTERNAL;
246 }
247 
248 /* available cpu features supported by kvm */
249 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
250 /* available subfunctions indicated via query / "test bit" */
251 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
252 
253 static struct gmap_notifier gmap_notifier;
254 static struct gmap_notifier vsie_gmap_notifier;
255 debug_info_t *kvm_s390_dbf;
256 debug_info_t *kvm_s390_dbf_uv;
257 
258 /* Section: not file related */
259 /* forward declarations */
260 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
261 			      unsigned long end);
262 static int sca_switch_to_extended(struct kvm *kvm);
263 
264 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
265 {
266 	u8 delta_idx = 0;
267 
268 	/*
269 	 * The TOD jumps by delta, we have to compensate this by adding
270 	 * -delta to the epoch.
271 	 */
272 	delta = -delta;
273 
274 	/* sign-extension - we're adding to signed values below */
275 	if ((s64)delta < 0)
276 		delta_idx = -1;
277 
278 	scb->epoch += delta;
279 	if (scb->ecd & ECD_MEF) {
280 		scb->epdx += delta_idx;
281 		if (scb->epoch < delta)
282 			scb->epdx += 1;
283 	}
284 }
285 
286 /*
287  * This callback is executed during stop_machine(). All CPUs are therefore
288  * temporarily stopped. In order not to change guest behavior, we have to
289  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
290  * so a CPU won't be stopped while calculating with the epoch.
291  */
292 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
293 			  void *v)
294 {
295 	struct kvm *kvm;
296 	struct kvm_vcpu *vcpu;
297 	unsigned long i;
298 	unsigned long long *delta = v;
299 
300 	list_for_each_entry(kvm, &vm_list, vm_list) {
301 		kvm_for_each_vcpu(i, vcpu, kvm) {
302 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
303 			if (i == 0) {
304 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
305 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
306 			}
307 			if (vcpu->arch.cputm_enabled)
308 				vcpu->arch.cputm_start += *delta;
309 			if (vcpu->arch.vsie_block)
310 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
311 						   *delta);
312 		}
313 	}
314 	return NOTIFY_OK;
315 }
316 
317 static struct notifier_block kvm_clock_notifier = {
318 	.notifier_call = kvm_clock_sync,
319 };
320 
321 static void allow_cpu_feat(unsigned long nr)
322 {
323 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
324 }
325 
326 static inline int plo_test_bit(unsigned char nr)
327 {
328 	unsigned long function = (unsigned long)nr | 0x100;
329 	int cc;
330 
331 	asm volatile(
332 		"	lgr	0,%[function]\n"
333 		/* Parameter registers are ignored for "test bit" */
334 		"	plo	0,0,0,0(0)\n"
335 		"	ipm	%0\n"
336 		"	srl	%0,28\n"
337 		: "=d" (cc)
338 		: [function] "d" (function)
339 		: "cc", "0");
340 	return cc == 0;
341 }
342 
343 static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
344 {
345 	asm volatile(
346 		"	lghi	0,0\n"
347 		"	lgr	1,%[query]\n"
348 		/* Parameter registers are ignored */
349 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
350 		:
351 		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
352 		: "cc", "memory", "0", "1");
353 }
354 
355 #define INSN_SORTL 0xb938
356 #define INSN_DFLTCC 0xb939
357 
358 static void __init kvm_s390_cpu_feat_init(void)
359 {
360 	int i;
361 
362 	for (i = 0; i < 256; ++i) {
363 		if (plo_test_bit(i))
364 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
365 	}
366 
367 	if (test_facility(28)) /* TOD-clock steering */
368 		ptff(kvm_s390_available_subfunc.ptff,
369 		     sizeof(kvm_s390_available_subfunc.ptff),
370 		     PTFF_QAF);
371 
372 	if (test_facility(17)) { /* MSA */
373 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
374 			      kvm_s390_available_subfunc.kmac);
375 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
376 			      kvm_s390_available_subfunc.kmc);
377 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
378 			      kvm_s390_available_subfunc.km);
379 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
380 			      kvm_s390_available_subfunc.kimd);
381 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
382 			      kvm_s390_available_subfunc.klmd);
383 	}
384 	if (test_facility(76)) /* MSA3 */
385 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
386 			      kvm_s390_available_subfunc.pckmo);
387 	if (test_facility(77)) { /* MSA4 */
388 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
389 			      kvm_s390_available_subfunc.kmctr);
390 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
391 			      kvm_s390_available_subfunc.kmf);
392 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
393 			      kvm_s390_available_subfunc.kmo);
394 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
395 			      kvm_s390_available_subfunc.pcc);
396 	}
397 	if (test_facility(57)) /* MSA5 */
398 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
399 			      kvm_s390_available_subfunc.ppno);
400 
401 	if (test_facility(146)) /* MSA8 */
402 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
403 			      kvm_s390_available_subfunc.kma);
404 
405 	if (test_facility(155)) /* MSA9 */
406 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
407 			      kvm_s390_available_subfunc.kdsa);
408 
409 	if (test_facility(150)) /* SORTL */
410 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
411 
412 	if (test_facility(151)) /* DFLTCC */
413 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
414 
415 	if (MACHINE_HAS_ESOP)
416 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
417 	/*
418 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
419 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
420 	 */
421 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
422 	    !test_facility(3) || !nested)
423 		return;
424 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
425 	if (sclp.has_64bscao)
426 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
427 	if (sclp.has_siif)
428 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
429 	if (sclp.has_gpere)
430 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
431 	if (sclp.has_gsls)
432 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
433 	if (sclp.has_ib)
434 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
435 	if (sclp.has_cei)
436 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
437 	if (sclp.has_ibs)
438 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
439 	if (sclp.has_kss)
440 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
441 	/*
442 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
443 	 * all skey handling functions read/set the skey from the PGSTE
444 	 * instead of the real storage key.
445 	 *
446 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
447 	 * pages being detected as preserved although they are resident.
448 	 *
449 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
450 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
451 	 *
452 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
453 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
454 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
455 	 *
456 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
457 	 * cannot easily shadow the SCA because of the ipte lock.
458 	 */
459 }
460 
461 static int __init __kvm_s390_init(void)
462 {
463 	int rc = -ENOMEM;
464 
465 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
466 	if (!kvm_s390_dbf)
467 		return -ENOMEM;
468 
469 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
470 	if (!kvm_s390_dbf_uv)
471 		goto err_kvm_uv;
472 
473 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
474 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
475 		goto err_debug_view;
476 
477 	kvm_s390_cpu_feat_init();
478 
479 	/* Register floating interrupt controller interface. */
480 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
481 	if (rc) {
482 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
483 		goto err_flic;
484 	}
485 
486 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
487 		rc = kvm_s390_pci_init();
488 		if (rc) {
489 			pr_err("Unable to allocate AIFT for PCI\n");
490 			goto err_pci;
491 		}
492 	}
493 
494 	rc = kvm_s390_gib_init(GAL_ISC);
495 	if (rc)
496 		goto err_gib;
497 
498 	gmap_notifier.notifier_call = kvm_gmap_notifier;
499 	gmap_register_pte_notifier(&gmap_notifier);
500 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
501 	gmap_register_pte_notifier(&vsie_gmap_notifier);
502 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
503 				       &kvm_clock_notifier);
504 
505 	return 0;
506 
507 err_gib:
508 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
509 		kvm_s390_pci_exit();
510 err_pci:
511 err_flic:
512 err_debug_view:
513 	debug_unregister(kvm_s390_dbf_uv);
514 err_kvm_uv:
515 	debug_unregister(kvm_s390_dbf);
516 	return rc;
517 }
518 
519 static void __kvm_s390_exit(void)
520 {
521 	gmap_unregister_pte_notifier(&gmap_notifier);
522 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
523 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
524 					 &kvm_clock_notifier);
525 
526 	kvm_s390_gib_destroy();
527 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
528 		kvm_s390_pci_exit();
529 	debug_unregister(kvm_s390_dbf);
530 	debug_unregister(kvm_s390_dbf_uv);
531 }
532 
533 /* Section: device related */
534 long kvm_arch_dev_ioctl(struct file *filp,
535 			unsigned int ioctl, unsigned long arg)
536 {
537 	if (ioctl == KVM_S390_ENABLE_SIE)
538 		return s390_enable_sie();
539 	return -EINVAL;
540 }
541 
542 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
543 {
544 	int r;
545 
546 	switch (ext) {
547 	case KVM_CAP_S390_PSW:
548 	case KVM_CAP_S390_GMAP:
549 	case KVM_CAP_SYNC_MMU:
550 #ifdef CONFIG_KVM_S390_UCONTROL
551 	case KVM_CAP_S390_UCONTROL:
552 #endif
553 	case KVM_CAP_ASYNC_PF:
554 	case KVM_CAP_SYNC_REGS:
555 	case KVM_CAP_ONE_REG:
556 	case KVM_CAP_ENABLE_CAP:
557 	case KVM_CAP_S390_CSS_SUPPORT:
558 	case KVM_CAP_IOEVENTFD:
559 	case KVM_CAP_DEVICE_CTRL:
560 	case KVM_CAP_S390_IRQCHIP:
561 	case KVM_CAP_VM_ATTRIBUTES:
562 	case KVM_CAP_MP_STATE:
563 	case KVM_CAP_IMMEDIATE_EXIT:
564 	case KVM_CAP_S390_INJECT_IRQ:
565 	case KVM_CAP_S390_USER_SIGP:
566 	case KVM_CAP_S390_USER_STSI:
567 	case KVM_CAP_S390_SKEYS:
568 	case KVM_CAP_S390_IRQ_STATE:
569 	case KVM_CAP_S390_USER_INSTR0:
570 	case KVM_CAP_S390_CMMA_MIGRATION:
571 	case KVM_CAP_S390_AIS:
572 	case KVM_CAP_S390_AIS_MIGRATION:
573 	case KVM_CAP_S390_VCPU_RESETS:
574 	case KVM_CAP_SET_GUEST_DEBUG:
575 	case KVM_CAP_S390_DIAG318:
576 	case KVM_CAP_IRQFD_RESAMPLE:
577 		r = 1;
578 		break;
579 	case KVM_CAP_SET_GUEST_DEBUG2:
580 		r = KVM_GUESTDBG_VALID_MASK;
581 		break;
582 	case KVM_CAP_S390_HPAGE_1M:
583 		r = 0;
584 		if (hpage && !kvm_is_ucontrol(kvm))
585 			r = 1;
586 		break;
587 	case KVM_CAP_S390_MEM_OP:
588 		r = MEM_OP_MAX_SIZE;
589 		break;
590 	case KVM_CAP_S390_MEM_OP_EXTENSION:
591 		/*
592 		 * Flag bits indicating which extensions are supported.
593 		 * If r > 0, the base extension must also be supported/indicated,
594 		 * in order to maintain backwards compatibility.
595 		 */
596 		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
597 		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
598 		break;
599 	case KVM_CAP_NR_VCPUS:
600 	case KVM_CAP_MAX_VCPUS:
601 	case KVM_CAP_MAX_VCPU_ID:
602 		r = KVM_S390_BSCA_CPU_SLOTS;
603 		if (!kvm_s390_use_sca_entries())
604 			r = KVM_MAX_VCPUS;
605 		else if (sclp.has_esca && sclp.has_64bscao)
606 			r = KVM_S390_ESCA_CPU_SLOTS;
607 		if (ext == KVM_CAP_NR_VCPUS)
608 			r = min_t(unsigned int, num_online_cpus(), r);
609 		break;
610 	case KVM_CAP_S390_COW:
611 		r = MACHINE_HAS_ESOP;
612 		break;
613 	case KVM_CAP_S390_VECTOR_REGISTERS:
614 		r = MACHINE_HAS_VX;
615 		break;
616 	case KVM_CAP_S390_RI:
617 		r = test_facility(64);
618 		break;
619 	case KVM_CAP_S390_GS:
620 		r = test_facility(133);
621 		break;
622 	case KVM_CAP_S390_BPB:
623 		r = test_facility(82);
624 		break;
625 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
626 		r = async_destroy && is_prot_virt_host();
627 		break;
628 	case KVM_CAP_S390_PROTECTED:
629 		r = is_prot_virt_host();
630 		break;
631 	case KVM_CAP_S390_PROTECTED_DUMP: {
632 		u64 pv_cmds_dump[] = {
633 			BIT_UVC_CMD_DUMP_INIT,
634 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
635 			BIT_UVC_CMD_DUMP_CPU,
636 			BIT_UVC_CMD_DUMP_COMPLETE,
637 		};
638 		int i;
639 
640 		r = is_prot_virt_host();
641 
642 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
643 			if (!test_bit_inv(pv_cmds_dump[i],
644 					  (unsigned long *)&uv_info.inst_calls_list)) {
645 				r = 0;
646 				break;
647 			}
648 		}
649 		break;
650 	}
651 	case KVM_CAP_S390_ZPCI_OP:
652 		r = kvm_s390_pci_interp_allowed();
653 		break;
654 	case KVM_CAP_S390_CPU_TOPOLOGY:
655 		r = test_facility(11);
656 		break;
657 	default:
658 		r = 0;
659 	}
660 	return r;
661 }
662 
663 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
664 {
665 	int i;
666 	gfn_t cur_gfn, last_gfn;
667 	unsigned long gaddr, vmaddr;
668 	struct gmap *gmap = kvm->arch.gmap;
669 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
670 
671 	/* Loop over all guest segments */
672 	cur_gfn = memslot->base_gfn;
673 	last_gfn = memslot->base_gfn + memslot->npages;
674 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
675 		gaddr = gfn_to_gpa(cur_gfn);
676 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
677 		if (kvm_is_error_hva(vmaddr))
678 			continue;
679 
680 		bitmap_zero(bitmap, _PAGE_ENTRIES);
681 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
682 		for (i = 0; i < _PAGE_ENTRIES; i++) {
683 			if (test_bit(i, bitmap))
684 				mark_page_dirty(kvm, cur_gfn + i);
685 		}
686 
687 		if (fatal_signal_pending(current))
688 			return;
689 		cond_resched();
690 	}
691 }
692 
693 /* Section: vm related */
694 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
695 
696 /*
697  * Get (and clear) the dirty memory log for a memory slot.
698  */
699 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
700 			       struct kvm_dirty_log *log)
701 {
702 	int r;
703 	unsigned long n;
704 	struct kvm_memory_slot *memslot;
705 	int is_dirty;
706 
707 	if (kvm_is_ucontrol(kvm))
708 		return -EINVAL;
709 
710 	mutex_lock(&kvm->slots_lock);
711 
712 	r = -EINVAL;
713 	if (log->slot >= KVM_USER_MEM_SLOTS)
714 		goto out;
715 
716 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
717 	if (r)
718 		goto out;
719 
720 	/* Clear the dirty log */
721 	if (is_dirty) {
722 		n = kvm_dirty_bitmap_bytes(memslot);
723 		memset(memslot->dirty_bitmap, 0, n);
724 	}
725 	r = 0;
726 out:
727 	mutex_unlock(&kvm->slots_lock);
728 	return r;
729 }
730 
731 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
732 {
733 	unsigned long i;
734 	struct kvm_vcpu *vcpu;
735 
736 	kvm_for_each_vcpu(i, vcpu, kvm) {
737 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
738 	}
739 }
740 
741 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
742 {
743 	int r;
744 
745 	if (cap->flags)
746 		return -EINVAL;
747 
748 	switch (cap->cap) {
749 	case KVM_CAP_S390_IRQCHIP:
750 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
751 		kvm->arch.use_irqchip = 1;
752 		r = 0;
753 		break;
754 	case KVM_CAP_S390_USER_SIGP:
755 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
756 		kvm->arch.user_sigp = 1;
757 		r = 0;
758 		break;
759 	case KVM_CAP_S390_VECTOR_REGISTERS:
760 		mutex_lock(&kvm->lock);
761 		if (kvm->created_vcpus) {
762 			r = -EBUSY;
763 		} else if (MACHINE_HAS_VX) {
764 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
765 			set_kvm_facility(kvm->arch.model.fac_list, 129);
766 			if (test_facility(134)) {
767 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
768 				set_kvm_facility(kvm->arch.model.fac_list, 134);
769 			}
770 			if (test_facility(135)) {
771 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
772 				set_kvm_facility(kvm->arch.model.fac_list, 135);
773 			}
774 			if (test_facility(148)) {
775 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
776 				set_kvm_facility(kvm->arch.model.fac_list, 148);
777 			}
778 			if (test_facility(152)) {
779 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
780 				set_kvm_facility(kvm->arch.model.fac_list, 152);
781 			}
782 			if (test_facility(192)) {
783 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
784 				set_kvm_facility(kvm->arch.model.fac_list, 192);
785 			}
786 			r = 0;
787 		} else
788 			r = -EINVAL;
789 		mutex_unlock(&kvm->lock);
790 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
791 			 r ? "(not available)" : "(success)");
792 		break;
793 	case KVM_CAP_S390_RI:
794 		r = -EINVAL;
795 		mutex_lock(&kvm->lock);
796 		if (kvm->created_vcpus) {
797 			r = -EBUSY;
798 		} else if (test_facility(64)) {
799 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
800 			set_kvm_facility(kvm->arch.model.fac_list, 64);
801 			r = 0;
802 		}
803 		mutex_unlock(&kvm->lock);
804 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
805 			 r ? "(not available)" : "(success)");
806 		break;
807 	case KVM_CAP_S390_AIS:
808 		mutex_lock(&kvm->lock);
809 		if (kvm->created_vcpus) {
810 			r = -EBUSY;
811 		} else {
812 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
813 			set_kvm_facility(kvm->arch.model.fac_list, 72);
814 			r = 0;
815 		}
816 		mutex_unlock(&kvm->lock);
817 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
818 			 r ? "(not available)" : "(success)");
819 		break;
820 	case KVM_CAP_S390_GS:
821 		r = -EINVAL;
822 		mutex_lock(&kvm->lock);
823 		if (kvm->created_vcpus) {
824 			r = -EBUSY;
825 		} else if (test_facility(133)) {
826 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
827 			set_kvm_facility(kvm->arch.model.fac_list, 133);
828 			r = 0;
829 		}
830 		mutex_unlock(&kvm->lock);
831 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
832 			 r ? "(not available)" : "(success)");
833 		break;
834 	case KVM_CAP_S390_HPAGE_1M:
835 		mutex_lock(&kvm->lock);
836 		if (kvm->created_vcpus)
837 			r = -EBUSY;
838 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
839 			r = -EINVAL;
840 		else {
841 			r = 0;
842 			mmap_write_lock(kvm->mm);
843 			kvm->mm->context.allow_gmap_hpage_1m = 1;
844 			mmap_write_unlock(kvm->mm);
845 			/*
846 			 * We might have to create fake 4k page
847 			 * tables. To avoid that the hardware works on
848 			 * stale PGSTEs, we emulate these instructions.
849 			 */
850 			kvm->arch.use_skf = 0;
851 			kvm->arch.use_pfmfi = 0;
852 		}
853 		mutex_unlock(&kvm->lock);
854 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
855 			 r ? "(not available)" : "(success)");
856 		break;
857 	case KVM_CAP_S390_USER_STSI:
858 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
859 		kvm->arch.user_stsi = 1;
860 		r = 0;
861 		break;
862 	case KVM_CAP_S390_USER_INSTR0:
863 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
864 		kvm->arch.user_instr0 = 1;
865 		icpt_operexc_on_all_vcpus(kvm);
866 		r = 0;
867 		break;
868 	case KVM_CAP_S390_CPU_TOPOLOGY:
869 		r = -EINVAL;
870 		mutex_lock(&kvm->lock);
871 		if (kvm->created_vcpus) {
872 			r = -EBUSY;
873 		} else if (test_facility(11)) {
874 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
875 			set_kvm_facility(kvm->arch.model.fac_list, 11);
876 			r = 0;
877 		}
878 		mutex_unlock(&kvm->lock);
879 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
880 			 r ? "(not available)" : "(success)");
881 		break;
882 	default:
883 		r = -EINVAL;
884 		break;
885 	}
886 	return r;
887 }
888 
889 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
890 {
891 	int ret;
892 
893 	switch (attr->attr) {
894 	case KVM_S390_VM_MEM_LIMIT_SIZE:
895 		ret = 0;
896 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
897 			 kvm->arch.mem_limit);
898 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
899 			ret = -EFAULT;
900 		break;
901 	default:
902 		ret = -ENXIO;
903 		break;
904 	}
905 	return ret;
906 }
907 
908 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
909 {
910 	int ret;
911 	unsigned int idx;
912 	switch (attr->attr) {
913 	case KVM_S390_VM_MEM_ENABLE_CMMA:
914 		ret = -ENXIO;
915 		if (!sclp.has_cmma)
916 			break;
917 
918 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
919 		mutex_lock(&kvm->lock);
920 		if (kvm->created_vcpus)
921 			ret = -EBUSY;
922 		else if (kvm->mm->context.allow_gmap_hpage_1m)
923 			ret = -EINVAL;
924 		else {
925 			kvm->arch.use_cmma = 1;
926 			/* Not compatible with cmma. */
927 			kvm->arch.use_pfmfi = 0;
928 			ret = 0;
929 		}
930 		mutex_unlock(&kvm->lock);
931 		break;
932 	case KVM_S390_VM_MEM_CLR_CMMA:
933 		ret = -ENXIO;
934 		if (!sclp.has_cmma)
935 			break;
936 		ret = -EINVAL;
937 		if (!kvm->arch.use_cmma)
938 			break;
939 
940 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
941 		mutex_lock(&kvm->lock);
942 		idx = srcu_read_lock(&kvm->srcu);
943 		s390_reset_cmma(kvm->arch.gmap->mm);
944 		srcu_read_unlock(&kvm->srcu, idx);
945 		mutex_unlock(&kvm->lock);
946 		ret = 0;
947 		break;
948 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
949 		unsigned long new_limit;
950 
951 		if (kvm_is_ucontrol(kvm))
952 			return -EINVAL;
953 
954 		if (get_user(new_limit, (u64 __user *)attr->addr))
955 			return -EFAULT;
956 
957 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
958 		    new_limit > kvm->arch.mem_limit)
959 			return -E2BIG;
960 
961 		if (!new_limit)
962 			return -EINVAL;
963 
964 		/* gmap_create takes last usable address */
965 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
966 			new_limit -= 1;
967 
968 		ret = -EBUSY;
969 		mutex_lock(&kvm->lock);
970 		if (!kvm->created_vcpus) {
971 			/* gmap_create will round the limit up */
972 			struct gmap *new = gmap_create(current->mm, new_limit);
973 
974 			if (!new) {
975 				ret = -ENOMEM;
976 			} else {
977 				gmap_remove(kvm->arch.gmap);
978 				new->private = kvm;
979 				kvm->arch.gmap = new;
980 				ret = 0;
981 			}
982 		}
983 		mutex_unlock(&kvm->lock);
984 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
985 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
986 			 (void *) kvm->arch.gmap->asce);
987 		break;
988 	}
989 	default:
990 		ret = -ENXIO;
991 		break;
992 	}
993 	return ret;
994 }
995 
996 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
997 
998 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
999 {
1000 	struct kvm_vcpu *vcpu;
1001 	unsigned long i;
1002 
1003 	kvm_s390_vcpu_block_all(kvm);
1004 
1005 	kvm_for_each_vcpu(i, vcpu, kvm) {
1006 		kvm_s390_vcpu_crypto_setup(vcpu);
1007 		/* recreate the shadow crycb by leaving the VSIE handler */
1008 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1009 	}
1010 
1011 	kvm_s390_vcpu_unblock_all(kvm);
1012 }
1013 
1014 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1015 {
1016 	mutex_lock(&kvm->lock);
1017 	switch (attr->attr) {
1018 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1019 		if (!test_kvm_facility(kvm, 76)) {
1020 			mutex_unlock(&kvm->lock);
1021 			return -EINVAL;
1022 		}
1023 		get_random_bytes(
1024 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1025 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1026 		kvm->arch.crypto.aes_kw = 1;
1027 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1028 		break;
1029 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1030 		if (!test_kvm_facility(kvm, 76)) {
1031 			mutex_unlock(&kvm->lock);
1032 			return -EINVAL;
1033 		}
1034 		get_random_bytes(
1035 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1036 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1037 		kvm->arch.crypto.dea_kw = 1;
1038 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1039 		break;
1040 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1041 		if (!test_kvm_facility(kvm, 76)) {
1042 			mutex_unlock(&kvm->lock);
1043 			return -EINVAL;
1044 		}
1045 		kvm->arch.crypto.aes_kw = 0;
1046 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1047 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1048 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1049 		break;
1050 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1051 		if (!test_kvm_facility(kvm, 76)) {
1052 			mutex_unlock(&kvm->lock);
1053 			return -EINVAL;
1054 		}
1055 		kvm->arch.crypto.dea_kw = 0;
1056 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1057 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1058 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1059 		break;
1060 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1061 		if (!ap_instructions_available()) {
1062 			mutex_unlock(&kvm->lock);
1063 			return -EOPNOTSUPP;
1064 		}
1065 		kvm->arch.crypto.apie = 1;
1066 		break;
1067 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1068 		if (!ap_instructions_available()) {
1069 			mutex_unlock(&kvm->lock);
1070 			return -EOPNOTSUPP;
1071 		}
1072 		kvm->arch.crypto.apie = 0;
1073 		break;
1074 	default:
1075 		mutex_unlock(&kvm->lock);
1076 		return -ENXIO;
1077 	}
1078 
1079 	kvm_s390_vcpu_crypto_reset_all(kvm);
1080 	mutex_unlock(&kvm->lock);
1081 	return 0;
1082 }
1083 
1084 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1085 {
1086 	/* Only set the ECB bits after guest requests zPCI interpretation */
1087 	if (!vcpu->kvm->arch.use_zpci_interp)
1088 		return;
1089 
1090 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1091 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1092 }
1093 
1094 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1095 {
1096 	struct kvm_vcpu *vcpu;
1097 	unsigned long i;
1098 
1099 	lockdep_assert_held(&kvm->lock);
1100 
1101 	if (!kvm_s390_pci_interp_allowed())
1102 		return;
1103 
1104 	/*
1105 	 * If host is configured for PCI and the necessary facilities are
1106 	 * available, turn on interpretation for the life of this guest
1107 	 */
1108 	kvm->arch.use_zpci_interp = 1;
1109 
1110 	kvm_s390_vcpu_block_all(kvm);
1111 
1112 	kvm_for_each_vcpu(i, vcpu, kvm) {
1113 		kvm_s390_vcpu_pci_setup(vcpu);
1114 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1115 	}
1116 
1117 	kvm_s390_vcpu_unblock_all(kvm);
1118 }
1119 
1120 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1121 {
1122 	unsigned long cx;
1123 	struct kvm_vcpu *vcpu;
1124 
1125 	kvm_for_each_vcpu(cx, vcpu, kvm)
1126 		kvm_s390_sync_request(req, vcpu);
1127 }
1128 
1129 /*
1130  * Must be called with kvm->srcu held to avoid races on memslots, and with
1131  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1132  */
1133 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1134 {
1135 	struct kvm_memory_slot *ms;
1136 	struct kvm_memslots *slots;
1137 	unsigned long ram_pages = 0;
1138 	int bkt;
1139 
1140 	/* migration mode already enabled */
1141 	if (kvm->arch.migration_mode)
1142 		return 0;
1143 	slots = kvm_memslots(kvm);
1144 	if (!slots || kvm_memslots_empty(slots))
1145 		return -EINVAL;
1146 
1147 	if (!kvm->arch.use_cmma) {
1148 		kvm->arch.migration_mode = 1;
1149 		return 0;
1150 	}
1151 	/* mark all the pages in active slots as dirty */
1152 	kvm_for_each_memslot(ms, bkt, slots) {
1153 		if (!ms->dirty_bitmap)
1154 			return -EINVAL;
1155 		/*
1156 		 * The second half of the bitmap is only used on x86,
1157 		 * and would be wasted otherwise, so we put it to good
1158 		 * use here to keep track of the state of the storage
1159 		 * attributes.
1160 		 */
1161 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1162 		ram_pages += ms->npages;
1163 	}
1164 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1165 	kvm->arch.migration_mode = 1;
1166 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1167 	return 0;
1168 }
1169 
1170 /*
1171  * Must be called with kvm->slots_lock to avoid races with ourselves and
1172  * kvm_s390_vm_start_migration.
1173  */
1174 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1175 {
1176 	/* migration mode already disabled */
1177 	if (!kvm->arch.migration_mode)
1178 		return 0;
1179 	kvm->arch.migration_mode = 0;
1180 	if (kvm->arch.use_cmma)
1181 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1182 	return 0;
1183 }
1184 
1185 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1186 				     struct kvm_device_attr *attr)
1187 {
1188 	int res = -ENXIO;
1189 
1190 	mutex_lock(&kvm->slots_lock);
1191 	switch (attr->attr) {
1192 	case KVM_S390_VM_MIGRATION_START:
1193 		res = kvm_s390_vm_start_migration(kvm);
1194 		break;
1195 	case KVM_S390_VM_MIGRATION_STOP:
1196 		res = kvm_s390_vm_stop_migration(kvm);
1197 		break;
1198 	default:
1199 		break;
1200 	}
1201 	mutex_unlock(&kvm->slots_lock);
1202 
1203 	return res;
1204 }
1205 
1206 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1207 				     struct kvm_device_attr *attr)
1208 {
1209 	u64 mig = kvm->arch.migration_mode;
1210 
1211 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1212 		return -ENXIO;
1213 
1214 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1215 		return -EFAULT;
1216 	return 0;
1217 }
1218 
1219 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1220 
1221 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1222 {
1223 	struct kvm_s390_vm_tod_clock gtod;
1224 
1225 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1226 		return -EFAULT;
1227 
1228 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1229 		return -EINVAL;
1230 	__kvm_s390_set_tod_clock(kvm, &gtod);
1231 
1232 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1233 		gtod.epoch_idx, gtod.tod);
1234 
1235 	return 0;
1236 }
1237 
1238 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1239 {
1240 	u8 gtod_high;
1241 
1242 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1243 					   sizeof(gtod_high)))
1244 		return -EFAULT;
1245 
1246 	if (gtod_high != 0)
1247 		return -EINVAL;
1248 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1249 
1250 	return 0;
1251 }
1252 
1253 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1254 {
1255 	struct kvm_s390_vm_tod_clock gtod = { 0 };
1256 
1257 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1258 			   sizeof(gtod.tod)))
1259 		return -EFAULT;
1260 
1261 	__kvm_s390_set_tod_clock(kvm, &gtod);
1262 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1263 	return 0;
1264 }
1265 
1266 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1267 {
1268 	int ret;
1269 
1270 	if (attr->flags)
1271 		return -EINVAL;
1272 
1273 	mutex_lock(&kvm->lock);
1274 	/*
1275 	 * For protected guests, the TOD is managed by the ultravisor, so trying
1276 	 * to change it will never bring the expected results.
1277 	 */
1278 	if (kvm_s390_pv_is_protected(kvm)) {
1279 		ret = -EOPNOTSUPP;
1280 		goto out_unlock;
1281 	}
1282 
1283 	switch (attr->attr) {
1284 	case KVM_S390_VM_TOD_EXT:
1285 		ret = kvm_s390_set_tod_ext(kvm, attr);
1286 		break;
1287 	case KVM_S390_VM_TOD_HIGH:
1288 		ret = kvm_s390_set_tod_high(kvm, attr);
1289 		break;
1290 	case KVM_S390_VM_TOD_LOW:
1291 		ret = kvm_s390_set_tod_low(kvm, attr);
1292 		break;
1293 	default:
1294 		ret = -ENXIO;
1295 		break;
1296 	}
1297 
1298 out_unlock:
1299 	mutex_unlock(&kvm->lock);
1300 	return ret;
1301 }
1302 
1303 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1304 				   struct kvm_s390_vm_tod_clock *gtod)
1305 {
1306 	union tod_clock clk;
1307 
1308 	preempt_disable();
1309 
1310 	store_tod_clock_ext(&clk);
1311 
1312 	gtod->tod = clk.tod + kvm->arch.epoch;
1313 	gtod->epoch_idx = 0;
1314 	if (test_kvm_facility(kvm, 139)) {
1315 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1316 		if (gtod->tod < clk.tod)
1317 			gtod->epoch_idx += 1;
1318 	}
1319 
1320 	preempt_enable();
1321 }
1322 
1323 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1324 {
1325 	struct kvm_s390_vm_tod_clock gtod;
1326 
1327 	memset(&gtod, 0, sizeof(gtod));
1328 	kvm_s390_get_tod_clock(kvm, &gtod);
1329 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1330 		return -EFAULT;
1331 
1332 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1333 		gtod.epoch_idx, gtod.tod);
1334 	return 0;
1335 }
1336 
1337 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1338 {
1339 	u8 gtod_high = 0;
1340 
1341 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1342 					 sizeof(gtod_high)))
1343 		return -EFAULT;
1344 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1345 
1346 	return 0;
1347 }
1348 
1349 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1350 {
1351 	u64 gtod;
1352 
1353 	gtod = kvm_s390_get_tod_clock_fast(kvm);
1354 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1355 		return -EFAULT;
1356 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1357 
1358 	return 0;
1359 }
1360 
1361 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1362 {
1363 	int ret;
1364 
1365 	if (attr->flags)
1366 		return -EINVAL;
1367 
1368 	switch (attr->attr) {
1369 	case KVM_S390_VM_TOD_EXT:
1370 		ret = kvm_s390_get_tod_ext(kvm, attr);
1371 		break;
1372 	case KVM_S390_VM_TOD_HIGH:
1373 		ret = kvm_s390_get_tod_high(kvm, attr);
1374 		break;
1375 	case KVM_S390_VM_TOD_LOW:
1376 		ret = kvm_s390_get_tod_low(kvm, attr);
1377 		break;
1378 	default:
1379 		ret = -ENXIO;
1380 		break;
1381 	}
1382 	return ret;
1383 }
1384 
1385 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1386 {
1387 	struct kvm_s390_vm_cpu_processor *proc;
1388 	u16 lowest_ibc, unblocked_ibc;
1389 	int ret = 0;
1390 
1391 	mutex_lock(&kvm->lock);
1392 	if (kvm->created_vcpus) {
1393 		ret = -EBUSY;
1394 		goto out;
1395 	}
1396 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1397 	if (!proc) {
1398 		ret = -ENOMEM;
1399 		goto out;
1400 	}
1401 	if (!copy_from_user(proc, (void __user *)attr->addr,
1402 			    sizeof(*proc))) {
1403 		kvm->arch.model.cpuid = proc->cpuid;
1404 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1405 		unblocked_ibc = sclp.ibc & 0xfff;
1406 		if (lowest_ibc && proc->ibc) {
1407 			if (proc->ibc > unblocked_ibc)
1408 				kvm->arch.model.ibc = unblocked_ibc;
1409 			else if (proc->ibc < lowest_ibc)
1410 				kvm->arch.model.ibc = lowest_ibc;
1411 			else
1412 				kvm->arch.model.ibc = proc->ibc;
1413 		}
1414 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1415 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1416 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1417 			 kvm->arch.model.ibc,
1418 			 kvm->arch.model.cpuid);
1419 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1420 			 kvm->arch.model.fac_list[0],
1421 			 kvm->arch.model.fac_list[1],
1422 			 kvm->arch.model.fac_list[2]);
1423 	} else
1424 		ret = -EFAULT;
1425 	kfree(proc);
1426 out:
1427 	mutex_unlock(&kvm->lock);
1428 	return ret;
1429 }
1430 
1431 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1432 				       struct kvm_device_attr *attr)
1433 {
1434 	struct kvm_s390_vm_cpu_feat data;
1435 
1436 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1437 		return -EFAULT;
1438 	if (!bitmap_subset((unsigned long *) data.feat,
1439 			   kvm_s390_available_cpu_feat,
1440 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1441 		return -EINVAL;
1442 
1443 	mutex_lock(&kvm->lock);
1444 	if (kvm->created_vcpus) {
1445 		mutex_unlock(&kvm->lock);
1446 		return -EBUSY;
1447 	}
1448 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1449 	mutex_unlock(&kvm->lock);
1450 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1451 			 data.feat[0],
1452 			 data.feat[1],
1453 			 data.feat[2]);
1454 	return 0;
1455 }
1456 
1457 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1458 					  struct kvm_device_attr *attr)
1459 {
1460 	mutex_lock(&kvm->lock);
1461 	if (kvm->created_vcpus) {
1462 		mutex_unlock(&kvm->lock);
1463 		return -EBUSY;
1464 	}
1465 
1466 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1467 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1468 		mutex_unlock(&kvm->lock);
1469 		return -EFAULT;
1470 	}
1471 	mutex_unlock(&kvm->lock);
1472 
1473 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1474 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1475 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1476 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1477 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1478 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1479 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1480 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1481 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1482 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1483 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1484 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1485 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1486 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1487 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1488 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1489 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1490 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1491 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1492 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1493 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1494 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1495 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1496 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1497 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1498 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1499 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1500 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1501 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1502 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1503 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1504 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1505 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1506 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1507 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1508 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1509 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1510 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1511 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1512 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1513 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1514 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1515 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1516 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1517 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1518 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1519 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1520 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1521 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1522 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1523 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1524 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1525 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1526 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1527 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1528 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1529 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1530 
1531 	return 0;
1532 }
1533 
1534 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1535 {
1536 	int ret = -ENXIO;
1537 
1538 	switch (attr->attr) {
1539 	case KVM_S390_VM_CPU_PROCESSOR:
1540 		ret = kvm_s390_set_processor(kvm, attr);
1541 		break;
1542 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1543 		ret = kvm_s390_set_processor_feat(kvm, attr);
1544 		break;
1545 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1546 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1547 		break;
1548 	}
1549 	return ret;
1550 }
1551 
1552 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1553 {
1554 	struct kvm_s390_vm_cpu_processor *proc;
1555 	int ret = 0;
1556 
1557 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1558 	if (!proc) {
1559 		ret = -ENOMEM;
1560 		goto out;
1561 	}
1562 	proc->cpuid = kvm->arch.model.cpuid;
1563 	proc->ibc = kvm->arch.model.ibc;
1564 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1565 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1566 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1567 		 kvm->arch.model.ibc,
1568 		 kvm->arch.model.cpuid);
1569 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1570 		 kvm->arch.model.fac_list[0],
1571 		 kvm->arch.model.fac_list[1],
1572 		 kvm->arch.model.fac_list[2]);
1573 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1574 		ret = -EFAULT;
1575 	kfree(proc);
1576 out:
1577 	return ret;
1578 }
1579 
1580 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1581 {
1582 	struct kvm_s390_vm_cpu_machine *mach;
1583 	int ret = 0;
1584 
1585 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1586 	if (!mach) {
1587 		ret = -ENOMEM;
1588 		goto out;
1589 	}
1590 	get_cpu_id((struct cpuid *) &mach->cpuid);
1591 	mach->ibc = sclp.ibc;
1592 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1593 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1594 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1595 	       sizeof(stfle_fac_list));
1596 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1597 		 kvm->arch.model.ibc,
1598 		 kvm->arch.model.cpuid);
1599 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1600 		 mach->fac_mask[0],
1601 		 mach->fac_mask[1],
1602 		 mach->fac_mask[2]);
1603 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1604 		 mach->fac_list[0],
1605 		 mach->fac_list[1],
1606 		 mach->fac_list[2]);
1607 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1608 		ret = -EFAULT;
1609 	kfree(mach);
1610 out:
1611 	return ret;
1612 }
1613 
1614 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1615 				       struct kvm_device_attr *attr)
1616 {
1617 	struct kvm_s390_vm_cpu_feat data;
1618 
1619 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1620 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1621 		return -EFAULT;
1622 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1623 			 data.feat[0],
1624 			 data.feat[1],
1625 			 data.feat[2]);
1626 	return 0;
1627 }
1628 
1629 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1630 				     struct kvm_device_attr *attr)
1631 {
1632 	struct kvm_s390_vm_cpu_feat data;
1633 
1634 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1635 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1636 		return -EFAULT;
1637 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1638 			 data.feat[0],
1639 			 data.feat[1],
1640 			 data.feat[2]);
1641 	return 0;
1642 }
1643 
1644 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1645 					  struct kvm_device_attr *attr)
1646 {
1647 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1648 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1649 		return -EFAULT;
1650 
1651 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1652 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1653 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1654 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1655 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1656 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1657 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1658 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1659 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1660 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1661 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1662 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1663 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1664 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1665 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1666 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1667 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1668 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1669 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1670 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1671 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1672 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1673 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1674 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1675 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1676 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1677 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1678 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1679 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1680 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1681 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1682 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1683 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1684 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1685 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1686 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1687 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1688 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1689 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1690 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1691 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1692 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1693 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1694 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1695 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1696 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1697 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1698 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1699 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1700 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1701 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1702 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1703 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1704 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1705 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1706 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1707 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1708 
1709 	return 0;
1710 }
1711 
1712 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1713 					struct kvm_device_attr *attr)
1714 {
1715 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1716 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1717 		return -EFAULT;
1718 
1719 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1720 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1721 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1722 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1723 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1724 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1725 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1726 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1727 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1728 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1729 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1730 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1731 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1732 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1733 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1734 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1735 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1736 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1737 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1738 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1739 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1740 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1741 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1742 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1743 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1744 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1745 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1746 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1747 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1748 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1749 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1750 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1751 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1752 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1753 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1754 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1755 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1756 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1757 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1758 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1759 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1760 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1761 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1762 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1763 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1764 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1765 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1766 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1767 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1768 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1769 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1770 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1771 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1772 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1773 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1774 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1775 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1776 
1777 	return 0;
1778 }
1779 
1780 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1781 {
1782 	int ret = -ENXIO;
1783 
1784 	switch (attr->attr) {
1785 	case KVM_S390_VM_CPU_PROCESSOR:
1786 		ret = kvm_s390_get_processor(kvm, attr);
1787 		break;
1788 	case KVM_S390_VM_CPU_MACHINE:
1789 		ret = kvm_s390_get_machine(kvm, attr);
1790 		break;
1791 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1792 		ret = kvm_s390_get_processor_feat(kvm, attr);
1793 		break;
1794 	case KVM_S390_VM_CPU_MACHINE_FEAT:
1795 		ret = kvm_s390_get_machine_feat(kvm, attr);
1796 		break;
1797 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1798 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1799 		break;
1800 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1801 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1802 		break;
1803 	}
1804 	return ret;
1805 }
1806 
1807 /**
1808  * kvm_s390_update_topology_change_report - update CPU topology change report
1809  * @kvm: guest KVM description
1810  * @val: set or clear the MTCR bit
1811  *
1812  * Updates the Multiprocessor Topology-Change-Report bit to signal
1813  * the guest with a topology change.
1814  * This is only relevant if the topology facility is present.
1815  *
1816  * The SCA version, bsca or esca, doesn't matter as offset is the same.
1817  */
1818 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1819 {
1820 	union sca_utility new, old;
1821 	struct bsca_block *sca;
1822 
1823 	read_lock(&kvm->arch.sca_lock);
1824 	sca = kvm->arch.sca;
1825 	do {
1826 		old = READ_ONCE(sca->utility);
1827 		new = old;
1828 		new.mtcr = val;
1829 	} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
1830 	read_unlock(&kvm->arch.sca_lock);
1831 }
1832 
1833 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1834 					       struct kvm_device_attr *attr)
1835 {
1836 	if (!test_kvm_facility(kvm, 11))
1837 		return -ENXIO;
1838 
1839 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1840 	return 0;
1841 }
1842 
1843 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1844 					       struct kvm_device_attr *attr)
1845 {
1846 	u8 topo;
1847 
1848 	if (!test_kvm_facility(kvm, 11))
1849 		return -ENXIO;
1850 
1851 	read_lock(&kvm->arch.sca_lock);
1852 	topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1853 	read_unlock(&kvm->arch.sca_lock);
1854 
1855 	return put_user(topo, (u8 __user *)attr->addr);
1856 }
1857 
1858 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1859 {
1860 	int ret;
1861 
1862 	switch (attr->group) {
1863 	case KVM_S390_VM_MEM_CTRL:
1864 		ret = kvm_s390_set_mem_control(kvm, attr);
1865 		break;
1866 	case KVM_S390_VM_TOD:
1867 		ret = kvm_s390_set_tod(kvm, attr);
1868 		break;
1869 	case KVM_S390_VM_CPU_MODEL:
1870 		ret = kvm_s390_set_cpu_model(kvm, attr);
1871 		break;
1872 	case KVM_S390_VM_CRYPTO:
1873 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1874 		break;
1875 	case KVM_S390_VM_MIGRATION:
1876 		ret = kvm_s390_vm_set_migration(kvm, attr);
1877 		break;
1878 	case KVM_S390_VM_CPU_TOPOLOGY:
1879 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1880 		break;
1881 	default:
1882 		ret = -ENXIO;
1883 		break;
1884 	}
1885 
1886 	return ret;
1887 }
1888 
1889 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1890 {
1891 	int ret;
1892 
1893 	switch (attr->group) {
1894 	case KVM_S390_VM_MEM_CTRL:
1895 		ret = kvm_s390_get_mem_control(kvm, attr);
1896 		break;
1897 	case KVM_S390_VM_TOD:
1898 		ret = kvm_s390_get_tod(kvm, attr);
1899 		break;
1900 	case KVM_S390_VM_CPU_MODEL:
1901 		ret = kvm_s390_get_cpu_model(kvm, attr);
1902 		break;
1903 	case KVM_S390_VM_MIGRATION:
1904 		ret = kvm_s390_vm_get_migration(kvm, attr);
1905 		break;
1906 	case KVM_S390_VM_CPU_TOPOLOGY:
1907 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
1908 		break;
1909 	default:
1910 		ret = -ENXIO;
1911 		break;
1912 	}
1913 
1914 	return ret;
1915 }
1916 
1917 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1918 {
1919 	int ret;
1920 
1921 	switch (attr->group) {
1922 	case KVM_S390_VM_MEM_CTRL:
1923 		switch (attr->attr) {
1924 		case KVM_S390_VM_MEM_ENABLE_CMMA:
1925 		case KVM_S390_VM_MEM_CLR_CMMA:
1926 			ret = sclp.has_cmma ? 0 : -ENXIO;
1927 			break;
1928 		case KVM_S390_VM_MEM_LIMIT_SIZE:
1929 			ret = 0;
1930 			break;
1931 		default:
1932 			ret = -ENXIO;
1933 			break;
1934 		}
1935 		break;
1936 	case KVM_S390_VM_TOD:
1937 		switch (attr->attr) {
1938 		case KVM_S390_VM_TOD_LOW:
1939 		case KVM_S390_VM_TOD_HIGH:
1940 			ret = 0;
1941 			break;
1942 		default:
1943 			ret = -ENXIO;
1944 			break;
1945 		}
1946 		break;
1947 	case KVM_S390_VM_CPU_MODEL:
1948 		switch (attr->attr) {
1949 		case KVM_S390_VM_CPU_PROCESSOR:
1950 		case KVM_S390_VM_CPU_MACHINE:
1951 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1952 		case KVM_S390_VM_CPU_MACHINE_FEAT:
1953 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1954 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1955 			ret = 0;
1956 			break;
1957 		default:
1958 			ret = -ENXIO;
1959 			break;
1960 		}
1961 		break;
1962 	case KVM_S390_VM_CRYPTO:
1963 		switch (attr->attr) {
1964 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1965 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1966 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1967 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1968 			ret = 0;
1969 			break;
1970 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1971 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1972 			ret = ap_instructions_available() ? 0 : -ENXIO;
1973 			break;
1974 		default:
1975 			ret = -ENXIO;
1976 			break;
1977 		}
1978 		break;
1979 	case KVM_S390_VM_MIGRATION:
1980 		ret = 0;
1981 		break;
1982 	case KVM_S390_VM_CPU_TOPOLOGY:
1983 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
1984 		break;
1985 	default:
1986 		ret = -ENXIO;
1987 		break;
1988 	}
1989 
1990 	return ret;
1991 }
1992 
1993 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1994 {
1995 	uint8_t *keys;
1996 	uint64_t hva;
1997 	int srcu_idx, i, r = 0;
1998 
1999 	if (args->flags != 0)
2000 		return -EINVAL;
2001 
2002 	/* Is this guest using storage keys? */
2003 	if (!mm_uses_skeys(current->mm))
2004 		return KVM_S390_GET_SKEYS_NONE;
2005 
2006 	/* Enforce sane limit on memory allocation */
2007 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2008 		return -EINVAL;
2009 
2010 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2011 	if (!keys)
2012 		return -ENOMEM;
2013 
2014 	mmap_read_lock(current->mm);
2015 	srcu_idx = srcu_read_lock(&kvm->srcu);
2016 	for (i = 0; i < args->count; i++) {
2017 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2018 		if (kvm_is_error_hva(hva)) {
2019 			r = -EFAULT;
2020 			break;
2021 		}
2022 
2023 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
2024 		if (r)
2025 			break;
2026 	}
2027 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2028 	mmap_read_unlock(current->mm);
2029 
2030 	if (!r) {
2031 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2032 				 sizeof(uint8_t) * args->count);
2033 		if (r)
2034 			r = -EFAULT;
2035 	}
2036 
2037 	kvfree(keys);
2038 	return r;
2039 }
2040 
2041 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2042 {
2043 	uint8_t *keys;
2044 	uint64_t hva;
2045 	int srcu_idx, i, r = 0;
2046 	bool unlocked;
2047 
2048 	if (args->flags != 0)
2049 		return -EINVAL;
2050 
2051 	/* Enforce sane limit on memory allocation */
2052 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2053 		return -EINVAL;
2054 
2055 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2056 	if (!keys)
2057 		return -ENOMEM;
2058 
2059 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2060 			   sizeof(uint8_t) * args->count);
2061 	if (r) {
2062 		r = -EFAULT;
2063 		goto out;
2064 	}
2065 
2066 	/* Enable storage key handling for the guest */
2067 	r = s390_enable_skey();
2068 	if (r)
2069 		goto out;
2070 
2071 	i = 0;
2072 	mmap_read_lock(current->mm);
2073 	srcu_idx = srcu_read_lock(&kvm->srcu);
2074         while (i < args->count) {
2075 		unlocked = false;
2076 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2077 		if (kvm_is_error_hva(hva)) {
2078 			r = -EFAULT;
2079 			break;
2080 		}
2081 
2082 		/* Lowest order bit is reserved */
2083 		if (keys[i] & 0x01) {
2084 			r = -EINVAL;
2085 			break;
2086 		}
2087 
2088 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2089 		if (r) {
2090 			r = fixup_user_fault(current->mm, hva,
2091 					     FAULT_FLAG_WRITE, &unlocked);
2092 			if (r)
2093 				break;
2094 		}
2095 		if (!r)
2096 			i++;
2097 	}
2098 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2099 	mmap_read_unlock(current->mm);
2100 out:
2101 	kvfree(keys);
2102 	return r;
2103 }
2104 
2105 /*
2106  * Base address and length must be sent at the start of each block, therefore
2107  * it's cheaper to send some clean data, as long as it's less than the size of
2108  * two longs.
2109  */
2110 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2111 /* for consistency */
2112 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2113 
2114 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2115 			      u8 *res, unsigned long bufsize)
2116 {
2117 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2118 
2119 	args->count = 0;
2120 	while (args->count < bufsize) {
2121 		hva = gfn_to_hva(kvm, cur_gfn);
2122 		/*
2123 		 * We return an error if the first value was invalid, but we
2124 		 * return successfully if at least one value was copied.
2125 		 */
2126 		if (kvm_is_error_hva(hva))
2127 			return args->count ? 0 : -EFAULT;
2128 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2129 			pgstev = 0;
2130 		res[args->count++] = (pgstev >> 24) & 0x43;
2131 		cur_gfn++;
2132 	}
2133 
2134 	return 0;
2135 }
2136 
2137 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2138 						     gfn_t gfn)
2139 {
2140 	return ____gfn_to_memslot(slots, gfn, true);
2141 }
2142 
2143 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2144 					      unsigned long cur_gfn)
2145 {
2146 	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2147 	unsigned long ofs = cur_gfn - ms->base_gfn;
2148 	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2149 
2150 	if (ms->base_gfn + ms->npages <= cur_gfn) {
2151 		mnode = rb_next(mnode);
2152 		/* If we are above the highest slot, wrap around */
2153 		if (!mnode)
2154 			mnode = rb_first(&slots->gfn_tree);
2155 
2156 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2157 		ofs = 0;
2158 	}
2159 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2160 	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2161 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2162 		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2163 	}
2164 	return ms->base_gfn + ofs;
2165 }
2166 
2167 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2168 			     u8 *res, unsigned long bufsize)
2169 {
2170 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2171 	struct kvm_memslots *slots = kvm_memslots(kvm);
2172 	struct kvm_memory_slot *ms;
2173 
2174 	if (unlikely(kvm_memslots_empty(slots)))
2175 		return 0;
2176 
2177 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2178 	ms = gfn_to_memslot(kvm, cur_gfn);
2179 	args->count = 0;
2180 	args->start_gfn = cur_gfn;
2181 	if (!ms)
2182 		return 0;
2183 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2184 	mem_end = kvm_s390_get_gfn_end(slots);
2185 
2186 	while (args->count < bufsize) {
2187 		hva = gfn_to_hva(kvm, cur_gfn);
2188 		if (kvm_is_error_hva(hva))
2189 			return 0;
2190 		/* Decrement only if we actually flipped the bit to 0 */
2191 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2192 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2193 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2194 			pgstev = 0;
2195 		/* Save the value */
2196 		res[args->count++] = (pgstev >> 24) & 0x43;
2197 		/* If the next bit is too far away, stop. */
2198 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2199 			return 0;
2200 		/* If we reached the previous "next", find the next one */
2201 		if (cur_gfn == next_gfn)
2202 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2203 		/* Reached the end of memory or of the buffer, stop */
2204 		if ((next_gfn >= mem_end) ||
2205 		    (next_gfn - args->start_gfn >= bufsize))
2206 			return 0;
2207 		cur_gfn++;
2208 		/* Reached the end of the current memslot, take the next one. */
2209 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2210 			ms = gfn_to_memslot(kvm, cur_gfn);
2211 			if (!ms)
2212 				return 0;
2213 		}
2214 	}
2215 	return 0;
2216 }
2217 
2218 /*
2219  * This function searches for the next page with dirty CMMA attributes, and
2220  * saves the attributes in the buffer up to either the end of the buffer or
2221  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2222  * no trailing clean bytes are saved.
2223  * In case no dirty bits were found, or if CMMA was not enabled or used, the
2224  * output buffer will indicate 0 as length.
2225  */
2226 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2227 				  struct kvm_s390_cmma_log *args)
2228 {
2229 	unsigned long bufsize;
2230 	int srcu_idx, peek, ret;
2231 	u8 *values;
2232 
2233 	if (!kvm->arch.use_cmma)
2234 		return -ENXIO;
2235 	/* Invalid/unsupported flags were specified */
2236 	if (args->flags & ~KVM_S390_CMMA_PEEK)
2237 		return -EINVAL;
2238 	/* Migration mode query, and we are not doing a migration */
2239 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2240 	if (!peek && !kvm->arch.migration_mode)
2241 		return -EINVAL;
2242 	/* CMMA is disabled or was not used, or the buffer has length zero */
2243 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2244 	if (!bufsize || !kvm->mm->context.uses_cmm) {
2245 		memset(args, 0, sizeof(*args));
2246 		return 0;
2247 	}
2248 	/* We are not peeking, and there are no dirty pages */
2249 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2250 		memset(args, 0, sizeof(*args));
2251 		return 0;
2252 	}
2253 
2254 	values = vmalloc(bufsize);
2255 	if (!values)
2256 		return -ENOMEM;
2257 
2258 	mmap_read_lock(kvm->mm);
2259 	srcu_idx = srcu_read_lock(&kvm->srcu);
2260 	if (peek)
2261 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2262 	else
2263 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2264 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2265 	mmap_read_unlock(kvm->mm);
2266 
2267 	if (kvm->arch.migration_mode)
2268 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2269 	else
2270 		args->remaining = 0;
2271 
2272 	if (copy_to_user((void __user *)args->values, values, args->count))
2273 		ret = -EFAULT;
2274 
2275 	vfree(values);
2276 	return ret;
2277 }
2278 
2279 /*
2280  * This function sets the CMMA attributes for the given pages. If the input
2281  * buffer has zero length, no action is taken, otherwise the attributes are
2282  * set and the mm->context.uses_cmm flag is set.
2283  */
2284 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2285 				  const struct kvm_s390_cmma_log *args)
2286 {
2287 	unsigned long hva, mask, pgstev, i;
2288 	uint8_t *bits;
2289 	int srcu_idx, r = 0;
2290 
2291 	mask = args->mask;
2292 
2293 	if (!kvm->arch.use_cmma)
2294 		return -ENXIO;
2295 	/* invalid/unsupported flags */
2296 	if (args->flags != 0)
2297 		return -EINVAL;
2298 	/* Enforce sane limit on memory allocation */
2299 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2300 		return -EINVAL;
2301 	/* Nothing to do */
2302 	if (args->count == 0)
2303 		return 0;
2304 
2305 	bits = vmalloc(array_size(sizeof(*bits), args->count));
2306 	if (!bits)
2307 		return -ENOMEM;
2308 
2309 	r = copy_from_user(bits, (void __user *)args->values, args->count);
2310 	if (r) {
2311 		r = -EFAULT;
2312 		goto out;
2313 	}
2314 
2315 	mmap_read_lock(kvm->mm);
2316 	srcu_idx = srcu_read_lock(&kvm->srcu);
2317 	for (i = 0; i < args->count; i++) {
2318 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2319 		if (kvm_is_error_hva(hva)) {
2320 			r = -EFAULT;
2321 			break;
2322 		}
2323 
2324 		pgstev = bits[i];
2325 		pgstev = pgstev << 24;
2326 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2327 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
2328 	}
2329 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2330 	mmap_read_unlock(kvm->mm);
2331 
2332 	if (!kvm->mm->context.uses_cmm) {
2333 		mmap_write_lock(kvm->mm);
2334 		kvm->mm->context.uses_cmm = 1;
2335 		mmap_write_unlock(kvm->mm);
2336 	}
2337 out:
2338 	vfree(bits);
2339 	return r;
2340 }
2341 
2342 /**
2343  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2344  * non protected.
2345  * @kvm: the VM whose protected vCPUs are to be converted
2346  * @rc: return value for the RC field of the UVC (in case of error)
2347  * @rrc: return value for the RRC field of the UVC (in case of error)
2348  *
2349  * Does not stop in case of error, tries to convert as many
2350  * CPUs as possible. In case of error, the RC and RRC of the last error are
2351  * returned.
2352  *
2353  * Return: 0 in case of success, otherwise -EIO
2354  */
2355 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2356 {
2357 	struct kvm_vcpu *vcpu;
2358 	unsigned long i;
2359 	u16 _rc, _rrc;
2360 	int ret = 0;
2361 
2362 	/*
2363 	 * We ignore failures and try to destroy as many CPUs as possible.
2364 	 * At the same time we must not free the assigned resources when
2365 	 * this fails, as the ultravisor has still access to that memory.
2366 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2367 	 * behind.
2368 	 * We want to return the first failure rc and rrc, though.
2369 	 */
2370 	kvm_for_each_vcpu(i, vcpu, kvm) {
2371 		mutex_lock(&vcpu->mutex);
2372 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2373 			*rc = _rc;
2374 			*rrc = _rrc;
2375 			ret = -EIO;
2376 		}
2377 		mutex_unlock(&vcpu->mutex);
2378 	}
2379 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2380 	if (use_gisa)
2381 		kvm_s390_gisa_enable(kvm);
2382 	return ret;
2383 }
2384 
2385 /**
2386  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2387  * to protected.
2388  * @kvm: the VM whose protected vCPUs are to be converted
2389  * @rc: return value for the RC field of the UVC (in case of error)
2390  * @rrc: return value for the RRC field of the UVC (in case of error)
2391  *
2392  * Tries to undo the conversion in case of error.
2393  *
2394  * Return: 0 in case of success, otherwise -EIO
2395  */
2396 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2397 {
2398 	unsigned long i;
2399 	int r = 0;
2400 	u16 dummy;
2401 
2402 	struct kvm_vcpu *vcpu;
2403 
2404 	/* Disable the GISA if the ultravisor does not support AIV. */
2405 	if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
2406 		kvm_s390_gisa_disable(kvm);
2407 
2408 	kvm_for_each_vcpu(i, vcpu, kvm) {
2409 		mutex_lock(&vcpu->mutex);
2410 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2411 		mutex_unlock(&vcpu->mutex);
2412 		if (r)
2413 			break;
2414 	}
2415 	if (r)
2416 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2417 	return r;
2418 }
2419 
2420 /*
2421  * Here we provide user space with a direct interface to query UV
2422  * related data like UV maxima and available features as well as
2423  * feature specific data.
2424  *
2425  * To facilitate future extension of the data structures we'll try to
2426  * write data up to the maximum requested length.
2427  */
2428 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2429 {
2430 	ssize_t len_min;
2431 
2432 	switch (info->header.id) {
2433 	case KVM_PV_INFO_VM: {
2434 		len_min =  sizeof(info->header) + sizeof(info->vm);
2435 
2436 		if (info->header.len_max < len_min)
2437 			return -EINVAL;
2438 
2439 		memcpy(info->vm.inst_calls_list,
2440 		       uv_info.inst_calls_list,
2441 		       sizeof(uv_info.inst_calls_list));
2442 
2443 		/* It's max cpuid not max cpus, so it's off by one */
2444 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2445 		info->vm.max_guests = uv_info.max_num_sec_conf;
2446 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2447 		info->vm.feature_indication = uv_info.uv_feature_indications;
2448 
2449 		return len_min;
2450 	}
2451 	case KVM_PV_INFO_DUMP: {
2452 		len_min =  sizeof(info->header) + sizeof(info->dump);
2453 
2454 		if (info->header.len_max < len_min)
2455 			return -EINVAL;
2456 
2457 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2458 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2459 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2460 		return len_min;
2461 	}
2462 	default:
2463 		return -EINVAL;
2464 	}
2465 }
2466 
2467 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2468 			   struct kvm_s390_pv_dmp dmp)
2469 {
2470 	int r = -EINVAL;
2471 	void __user *result_buff = (void __user *)dmp.buff_addr;
2472 
2473 	switch (dmp.subcmd) {
2474 	case KVM_PV_DUMP_INIT: {
2475 		if (kvm->arch.pv.dumping)
2476 			break;
2477 
2478 		/*
2479 		 * Block SIE entry as concurrent dump UVCs could lead
2480 		 * to validities.
2481 		 */
2482 		kvm_s390_vcpu_block_all(kvm);
2483 
2484 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2485 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2486 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2487 			     cmd->rc, cmd->rrc);
2488 		if (!r) {
2489 			kvm->arch.pv.dumping = true;
2490 		} else {
2491 			kvm_s390_vcpu_unblock_all(kvm);
2492 			r = -EINVAL;
2493 		}
2494 		break;
2495 	}
2496 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2497 		if (!kvm->arch.pv.dumping)
2498 			break;
2499 
2500 		/*
2501 		 * gaddr is an output parameter since we might stop
2502 		 * early. As dmp will be copied back in our caller, we
2503 		 * don't need to do it ourselves.
2504 		 */
2505 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2506 						&cmd->rc, &cmd->rrc);
2507 		break;
2508 	}
2509 	case KVM_PV_DUMP_COMPLETE: {
2510 		if (!kvm->arch.pv.dumping)
2511 			break;
2512 
2513 		r = -EINVAL;
2514 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2515 			break;
2516 
2517 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2518 					      &cmd->rc, &cmd->rrc);
2519 		break;
2520 	}
2521 	default:
2522 		r = -ENOTTY;
2523 		break;
2524 	}
2525 
2526 	return r;
2527 }
2528 
2529 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2530 {
2531 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2532 	void __user *argp = (void __user *)cmd->data;
2533 	int r = 0;
2534 	u16 dummy;
2535 
2536 	if (need_lock)
2537 		mutex_lock(&kvm->lock);
2538 
2539 	switch (cmd->cmd) {
2540 	case KVM_PV_ENABLE: {
2541 		r = -EINVAL;
2542 		if (kvm_s390_pv_is_protected(kvm))
2543 			break;
2544 
2545 		/*
2546 		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
2547 		 *  esca, we need no cleanup in the error cases below
2548 		 */
2549 		r = sca_switch_to_extended(kvm);
2550 		if (r)
2551 			break;
2552 
2553 		mmap_write_lock(current->mm);
2554 		r = gmap_mark_unmergeable();
2555 		mmap_write_unlock(current->mm);
2556 		if (r)
2557 			break;
2558 
2559 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2560 		if (r)
2561 			break;
2562 
2563 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2564 		if (r)
2565 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2566 
2567 		/* we need to block service interrupts from now on */
2568 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2569 		break;
2570 	}
2571 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2572 		r = -EINVAL;
2573 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2574 			break;
2575 
2576 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2577 		/*
2578 		 * If a CPU could not be destroyed, destroy VM will also fail.
2579 		 * There is no point in trying to destroy it. Instead return
2580 		 * the rc and rrc from the first CPU that failed destroying.
2581 		 */
2582 		if (r)
2583 			break;
2584 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2585 
2586 		/* no need to block service interrupts any more */
2587 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2588 		break;
2589 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2590 		r = -EINVAL;
2591 		if (!async_destroy)
2592 			break;
2593 		/* kvm->lock must not be held; this is asserted inside the function. */
2594 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2595 		break;
2596 	case KVM_PV_DISABLE: {
2597 		r = -EINVAL;
2598 		if (!kvm_s390_pv_is_protected(kvm))
2599 			break;
2600 
2601 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2602 		/*
2603 		 * If a CPU could not be destroyed, destroy VM will also fail.
2604 		 * There is no point in trying to destroy it. Instead return
2605 		 * the rc and rrc from the first CPU that failed destroying.
2606 		 */
2607 		if (r)
2608 			break;
2609 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2610 
2611 		/* no need to block service interrupts any more */
2612 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2613 		break;
2614 	}
2615 	case KVM_PV_SET_SEC_PARMS: {
2616 		struct kvm_s390_pv_sec_parm parms = {};
2617 		void *hdr;
2618 
2619 		r = -EINVAL;
2620 		if (!kvm_s390_pv_is_protected(kvm))
2621 			break;
2622 
2623 		r = -EFAULT;
2624 		if (copy_from_user(&parms, argp, sizeof(parms)))
2625 			break;
2626 
2627 		/* Currently restricted to 8KB */
2628 		r = -EINVAL;
2629 		if (parms.length > PAGE_SIZE * 2)
2630 			break;
2631 
2632 		r = -ENOMEM;
2633 		hdr = vmalloc(parms.length);
2634 		if (!hdr)
2635 			break;
2636 
2637 		r = -EFAULT;
2638 		if (!copy_from_user(hdr, (void __user *)parms.origin,
2639 				    parms.length))
2640 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2641 						      &cmd->rc, &cmd->rrc);
2642 
2643 		vfree(hdr);
2644 		break;
2645 	}
2646 	case KVM_PV_UNPACK: {
2647 		struct kvm_s390_pv_unp unp = {};
2648 
2649 		r = -EINVAL;
2650 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2651 			break;
2652 
2653 		r = -EFAULT;
2654 		if (copy_from_user(&unp, argp, sizeof(unp)))
2655 			break;
2656 
2657 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2658 				       &cmd->rc, &cmd->rrc);
2659 		break;
2660 	}
2661 	case KVM_PV_VERIFY: {
2662 		r = -EINVAL;
2663 		if (!kvm_s390_pv_is_protected(kvm))
2664 			break;
2665 
2666 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2667 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2668 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2669 			     cmd->rrc);
2670 		break;
2671 	}
2672 	case KVM_PV_PREP_RESET: {
2673 		r = -EINVAL;
2674 		if (!kvm_s390_pv_is_protected(kvm))
2675 			break;
2676 
2677 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2678 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2679 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2680 			     cmd->rc, cmd->rrc);
2681 		break;
2682 	}
2683 	case KVM_PV_UNSHARE_ALL: {
2684 		r = -EINVAL;
2685 		if (!kvm_s390_pv_is_protected(kvm))
2686 			break;
2687 
2688 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2689 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2690 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2691 			     cmd->rc, cmd->rrc);
2692 		break;
2693 	}
2694 	case KVM_PV_INFO: {
2695 		struct kvm_s390_pv_info info = {};
2696 		ssize_t data_len;
2697 
2698 		/*
2699 		 * No need to check the VM protection here.
2700 		 *
2701 		 * Maybe user space wants to query some of the data
2702 		 * when the VM is still unprotected. If we see the
2703 		 * need to fence a new data command we can still
2704 		 * return an error in the info handler.
2705 		 */
2706 
2707 		r = -EFAULT;
2708 		if (copy_from_user(&info, argp, sizeof(info.header)))
2709 			break;
2710 
2711 		r = -EINVAL;
2712 		if (info.header.len_max < sizeof(info.header))
2713 			break;
2714 
2715 		data_len = kvm_s390_handle_pv_info(&info);
2716 		if (data_len < 0) {
2717 			r = data_len;
2718 			break;
2719 		}
2720 		/*
2721 		 * If a data command struct is extended (multiple
2722 		 * times) this can be used to determine how much of it
2723 		 * is valid.
2724 		 */
2725 		info.header.len_written = data_len;
2726 
2727 		r = -EFAULT;
2728 		if (copy_to_user(argp, &info, data_len))
2729 			break;
2730 
2731 		r = 0;
2732 		break;
2733 	}
2734 	case KVM_PV_DUMP: {
2735 		struct kvm_s390_pv_dmp dmp;
2736 
2737 		r = -EINVAL;
2738 		if (!kvm_s390_pv_is_protected(kvm))
2739 			break;
2740 
2741 		r = -EFAULT;
2742 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2743 			break;
2744 
2745 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2746 		if (r)
2747 			break;
2748 
2749 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2750 			r = -EFAULT;
2751 			break;
2752 		}
2753 
2754 		break;
2755 	}
2756 	default:
2757 		r = -ENOTTY;
2758 	}
2759 	if (need_lock)
2760 		mutex_unlock(&kvm->lock);
2761 
2762 	return r;
2763 }
2764 
2765 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2766 {
2767 	if (mop->flags & ~supported_flags || !mop->size)
2768 		return -EINVAL;
2769 	if (mop->size > MEM_OP_MAX_SIZE)
2770 		return -E2BIG;
2771 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2772 		if (mop->key > 0xf)
2773 			return -EINVAL;
2774 	} else {
2775 		mop->key = 0;
2776 	}
2777 	return 0;
2778 }
2779 
2780 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2781 {
2782 	void __user *uaddr = (void __user *)mop->buf;
2783 	enum gacc_mode acc_mode;
2784 	void *tmpbuf = NULL;
2785 	int r, srcu_idx;
2786 
2787 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2788 					KVM_S390_MEMOP_F_CHECK_ONLY);
2789 	if (r)
2790 		return r;
2791 
2792 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2793 		tmpbuf = vmalloc(mop->size);
2794 		if (!tmpbuf)
2795 			return -ENOMEM;
2796 	}
2797 
2798 	srcu_idx = srcu_read_lock(&kvm->srcu);
2799 
2800 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2801 		r = PGM_ADDRESSING;
2802 		goto out_unlock;
2803 	}
2804 
2805 	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2806 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2807 		r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2808 		goto out_unlock;
2809 	}
2810 	if (acc_mode == GACC_FETCH) {
2811 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2812 					      mop->size, GACC_FETCH, mop->key);
2813 		if (r)
2814 			goto out_unlock;
2815 		if (copy_to_user(uaddr, tmpbuf, mop->size))
2816 			r = -EFAULT;
2817 	} else {
2818 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2819 			r = -EFAULT;
2820 			goto out_unlock;
2821 		}
2822 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2823 					      mop->size, GACC_STORE, mop->key);
2824 	}
2825 
2826 out_unlock:
2827 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2828 
2829 	vfree(tmpbuf);
2830 	return r;
2831 }
2832 
2833 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2834 {
2835 	void __user *uaddr = (void __user *)mop->buf;
2836 	void __user *old_addr = (void __user *)mop->old_addr;
2837 	union {
2838 		__uint128_t quad;
2839 		char raw[sizeof(__uint128_t)];
2840 	} old = { .quad = 0}, new = { .quad = 0 };
2841 	unsigned int off_in_quad = sizeof(new) - mop->size;
2842 	int r, srcu_idx;
2843 	bool success;
2844 
2845 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2846 	if (r)
2847 		return r;
2848 	/*
2849 	 * This validates off_in_quad. Checking that size is a power
2850 	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2851 	 * takes care of that
2852 	 */
2853 	if (mop->size > sizeof(new))
2854 		return -EINVAL;
2855 	if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2856 		return -EFAULT;
2857 	if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2858 		return -EFAULT;
2859 
2860 	srcu_idx = srcu_read_lock(&kvm->srcu);
2861 
2862 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2863 		r = PGM_ADDRESSING;
2864 		goto out_unlock;
2865 	}
2866 
2867 	r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2868 				       new.quad, mop->key, &success);
2869 	if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2870 		r = -EFAULT;
2871 
2872 out_unlock:
2873 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2874 	return r;
2875 }
2876 
2877 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2878 {
2879 	/*
2880 	 * This is technically a heuristic only, if the kvm->lock is not
2881 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2882 	 * This is ok from a kernel perspective, wrongdoing is detected
2883 	 * on the access, -EFAULT is returned and the vm may crash the
2884 	 * next time it accesses the memory in question.
2885 	 * There is no sane usecase to do switching and a memop on two
2886 	 * different CPUs at the same time.
2887 	 */
2888 	if (kvm_s390_pv_get_handle(kvm))
2889 		return -EINVAL;
2890 
2891 	switch (mop->op) {
2892 	case KVM_S390_MEMOP_ABSOLUTE_READ:
2893 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2894 		return kvm_s390_vm_mem_op_abs(kvm, mop);
2895 	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2896 		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2897 	default:
2898 		return -EINVAL;
2899 	}
2900 }
2901 
2902 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2903 {
2904 	struct kvm *kvm = filp->private_data;
2905 	void __user *argp = (void __user *)arg;
2906 	struct kvm_device_attr attr;
2907 	int r;
2908 
2909 	switch (ioctl) {
2910 	case KVM_S390_INTERRUPT: {
2911 		struct kvm_s390_interrupt s390int;
2912 
2913 		r = -EFAULT;
2914 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2915 			break;
2916 		r = kvm_s390_inject_vm(kvm, &s390int);
2917 		break;
2918 	}
2919 	case KVM_CREATE_IRQCHIP: {
2920 		struct kvm_irq_routing_entry routing;
2921 
2922 		r = -EINVAL;
2923 		if (kvm->arch.use_irqchip) {
2924 			/* Set up dummy routing. */
2925 			memset(&routing, 0, sizeof(routing));
2926 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
2927 		}
2928 		break;
2929 	}
2930 	case KVM_SET_DEVICE_ATTR: {
2931 		r = -EFAULT;
2932 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2933 			break;
2934 		r = kvm_s390_vm_set_attr(kvm, &attr);
2935 		break;
2936 	}
2937 	case KVM_GET_DEVICE_ATTR: {
2938 		r = -EFAULT;
2939 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2940 			break;
2941 		r = kvm_s390_vm_get_attr(kvm, &attr);
2942 		break;
2943 	}
2944 	case KVM_HAS_DEVICE_ATTR: {
2945 		r = -EFAULT;
2946 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2947 			break;
2948 		r = kvm_s390_vm_has_attr(kvm, &attr);
2949 		break;
2950 	}
2951 	case KVM_S390_GET_SKEYS: {
2952 		struct kvm_s390_skeys args;
2953 
2954 		r = -EFAULT;
2955 		if (copy_from_user(&args, argp,
2956 				   sizeof(struct kvm_s390_skeys)))
2957 			break;
2958 		r = kvm_s390_get_skeys(kvm, &args);
2959 		break;
2960 	}
2961 	case KVM_S390_SET_SKEYS: {
2962 		struct kvm_s390_skeys args;
2963 
2964 		r = -EFAULT;
2965 		if (copy_from_user(&args, argp,
2966 				   sizeof(struct kvm_s390_skeys)))
2967 			break;
2968 		r = kvm_s390_set_skeys(kvm, &args);
2969 		break;
2970 	}
2971 	case KVM_S390_GET_CMMA_BITS: {
2972 		struct kvm_s390_cmma_log args;
2973 
2974 		r = -EFAULT;
2975 		if (copy_from_user(&args, argp, sizeof(args)))
2976 			break;
2977 		mutex_lock(&kvm->slots_lock);
2978 		r = kvm_s390_get_cmma_bits(kvm, &args);
2979 		mutex_unlock(&kvm->slots_lock);
2980 		if (!r) {
2981 			r = copy_to_user(argp, &args, sizeof(args));
2982 			if (r)
2983 				r = -EFAULT;
2984 		}
2985 		break;
2986 	}
2987 	case KVM_S390_SET_CMMA_BITS: {
2988 		struct kvm_s390_cmma_log args;
2989 
2990 		r = -EFAULT;
2991 		if (copy_from_user(&args, argp, sizeof(args)))
2992 			break;
2993 		mutex_lock(&kvm->slots_lock);
2994 		r = kvm_s390_set_cmma_bits(kvm, &args);
2995 		mutex_unlock(&kvm->slots_lock);
2996 		break;
2997 	}
2998 	case KVM_S390_PV_COMMAND: {
2999 		struct kvm_pv_cmd args;
3000 
3001 		/* protvirt means user cpu state */
3002 		kvm_s390_set_user_cpu_state_ctrl(kvm);
3003 		r = 0;
3004 		if (!is_prot_virt_host()) {
3005 			r = -EINVAL;
3006 			break;
3007 		}
3008 		if (copy_from_user(&args, argp, sizeof(args))) {
3009 			r = -EFAULT;
3010 			break;
3011 		}
3012 		if (args.flags) {
3013 			r = -EINVAL;
3014 			break;
3015 		}
3016 		/* must be called without kvm->lock */
3017 		r = kvm_s390_handle_pv(kvm, &args);
3018 		if (copy_to_user(argp, &args, sizeof(args))) {
3019 			r = -EFAULT;
3020 			break;
3021 		}
3022 		break;
3023 	}
3024 	case KVM_S390_MEM_OP: {
3025 		struct kvm_s390_mem_op mem_op;
3026 
3027 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3028 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
3029 		else
3030 			r = -EFAULT;
3031 		break;
3032 	}
3033 	case KVM_S390_ZPCI_OP: {
3034 		struct kvm_s390_zpci_op args;
3035 
3036 		r = -EINVAL;
3037 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3038 			break;
3039 		if (copy_from_user(&args, argp, sizeof(args))) {
3040 			r = -EFAULT;
3041 			break;
3042 		}
3043 		r = kvm_s390_pci_zpci_op(kvm, &args);
3044 		break;
3045 	}
3046 	default:
3047 		r = -ENOTTY;
3048 	}
3049 
3050 	return r;
3051 }
3052 
3053 static int kvm_s390_apxa_installed(void)
3054 {
3055 	struct ap_config_info info;
3056 
3057 	if (ap_instructions_available()) {
3058 		if (ap_qci(&info) == 0)
3059 			return info.apxa;
3060 	}
3061 
3062 	return 0;
3063 }
3064 
3065 /*
3066  * The format of the crypto control block (CRYCB) is specified in the 3 low
3067  * order bits of the CRYCB designation (CRYCBD) field as follows:
3068  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3069  *	     AP extended addressing (APXA) facility are installed.
3070  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3071  * Format 2: Both the APXA and MSAX3 facilities are installed
3072  */
3073 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3074 {
3075 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
3076 
3077 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3078 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3079 
3080 	/* Check whether MSAX3 is installed */
3081 	if (!test_kvm_facility(kvm, 76))
3082 		return;
3083 
3084 	if (kvm_s390_apxa_installed())
3085 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3086 	else
3087 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3088 }
3089 
3090 /*
3091  * kvm_arch_crypto_set_masks
3092  *
3093  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3094  *	 to be set.
3095  * @apm: the mask identifying the accessible AP adapters
3096  * @aqm: the mask identifying the accessible AP domains
3097  * @adm: the mask identifying the accessible AP control domains
3098  *
3099  * Set the masks that identify the adapters, domains and control domains to
3100  * which the KVM guest is granted access.
3101  *
3102  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3103  *	 function.
3104  */
3105 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3106 			       unsigned long *aqm, unsigned long *adm)
3107 {
3108 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3109 
3110 	kvm_s390_vcpu_block_all(kvm);
3111 
3112 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3113 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3114 		memcpy(crycb->apcb1.apm, apm, 32);
3115 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3116 			 apm[0], apm[1], apm[2], apm[3]);
3117 		memcpy(crycb->apcb1.aqm, aqm, 32);
3118 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3119 			 aqm[0], aqm[1], aqm[2], aqm[3]);
3120 		memcpy(crycb->apcb1.adm, adm, 32);
3121 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3122 			 adm[0], adm[1], adm[2], adm[3]);
3123 		break;
3124 	case CRYCB_FORMAT1:
3125 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3126 		memcpy(crycb->apcb0.apm, apm, 8);
3127 		memcpy(crycb->apcb0.aqm, aqm, 2);
3128 		memcpy(crycb->apcb0.adm, adm, 2);
3129 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3130 			 apm[0], *((unsigned short *)aqm),
3131 			 *((unsigned short *)adm));
3132 		break;
3133 	default:	/* Can not happen */
3134 		break;
3135 	}
3136 
3137 	/* recreate the shadow crycb for each vcpu */
3138 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3139 	kvm_s390_vcpu_unblock_all(kvm);
3140 }
3141 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3142 
3143 /*
3144  * kvm_arch_crypto_clear_masks
3145  *
3146  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3147  *	 to be cleared.
3148  *
3149  * Clear the masks that identify the adapters, domains and control domains to
3150  * which the KVM guest is granted access.
3151  *
3152  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3153  *	 function.
3154  */
3155 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3156 {
3157 	kvm_s390_vcpu_block_all(kvm);
3158 
3159 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3160 	       sizeof(kvm->arch.crypto.crycb->apcb0));
3161 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3162 	       sizeof(kvm->arch.crypto.crycb->apcb1));
3163 
3164 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3165 	/* recreate the shadow crycb for each vcpu */
3166 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3167 	kvm_s390_vcpu_unblock_all(kvm);
3168 }
3169 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3170 
3171 static u64 kvm_s390_get_initial_cpuid(void)
3172 {
3173 	struct cpuid cpuid;
3174 
3175 	get_cpu_id(&cpuid);
3176 	cpuid.version = 0xff;
3177 	return *((u64 *) &cpuid);
3178 }
3179 
3180 static void kvm_s390_crypto_init(struct kvm *kvm)
3181 {
3182 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3183 	kvm_s390_set_crycb_format(kvm);
3184 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3185 
3186 	if (!test_kvm_facility(kvm, 76))
3187 		return;
3188 
3189 	/* Enable AES/DEA protected key functions by default */
3190 	kvm->arch.crypto.aes_kw = 1;
3191 	kvm->arch.crypto.dea_kw = 1;
3192 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3193 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3194 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3195 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3196 }
3197 
3198 static void sca_dispose(struct kvm *kvm)
3199 {
3200 	if (kvm->arch.use_esca)
3201 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3202 	else
3203 		free_page((unsigned long)(kvm->arch.sca));
3204 	kvm->arch.sca = NULL;
3205 }
3206 
3207 void kvm_arch_free_vm(struct kvm *kvm)
3208 {
3209 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3210 		kvm_s390_pci_clear_list(kvm);
3211 
3212 	__kvm_arch_free_vm(kvm);
3213 }
3214 
3215 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3216 {
3217 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3218 	int i, rc;
3219 	char debug_name[16];
3220 	static unsigned long sca_offset;
3221 
3222 	rc = -EINVAL;
3223 #ifdef CONFIG_KVM_S390_UCONTROL
3224 	if (type & ~KVM_VM_S390_UCONTROL)
3225 		goto out_err;
3226 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3227 		goto out_err;
3228 #else
3229 	if (type)
3230 		goto out_err;
3231 #endif
3232 
3233 	rc = s390_enable_sie();
3234 	if (rc)
3235 		goto out_err;
3236 
3237 	rc = -ENOMEM;
3238 
3239 	if (!sclp.has_64bscao)
3240 		alloc_flags |= GFP_DMA;
3241 	rwlock_init(&kvm->arch.sca_lock);
3242 	/* start with basic SCA */
3243 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3244 	if (!kvm->arch.sca)
3245 		goto out_err;
3246 	mutex_lock(&kvm_lock);
3247 	sca_offset += 16;
3248 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3249 		sca_offset = 0;
3250 	kvm->arch.sca = (struct bsca_block *)
3251 			((char *) kvm->arch.sca + sca_offset);
3252 	mutex_unlock(&kvm_lock);
3253 
3254 	sprintf(debug_name, "kvm-%u", current->pid);
3255 
3256 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3257 	if (!kvm->arch.dbf)
3258 		goto out_err;
3259 
3260 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3261 	kvm->arch.sie_page2 =
3262 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3263 	if (!kvm->arch.sie_page2)
3264 		goto out_err;
3265 
3266 	kvm->arch.sie_page2->kvm = kvm;
3267 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3268 
3269 	for (i = 0; i < kvm_s390_fac_size(); i++) {
3270 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3271 					      (kvm_s390_fac_base[i] |
3272 					       kvm_s390_fac_ext[i]);
3273 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3274 					      kvm_s390_fac_base[i];
3275 	}
3276 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3277 
3278 	/* we are always in czam mode - even on pre z14 machines */
3279 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3280 	set_kvm_facility(kvm->arch.model.fac_list, 138);
3281 	/* we emulate STHYI in kvm */
3282 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3283 	set_kvm_facility(kvm->arch.model.fac_list, 74);
3284 	if (MACHINE_HAS_TLB_GUEST) {
3285 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3286 		set_kvm_facility(kvm->arch.model.fac_list, 147);
3287 	}
3288 
3289 	if (css_general_characteristics.aiv && test_facility(65))
3290 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3291 
3292 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3293 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3294 
3295 	kvm_s390_crypto_init(kvm);
3296 
3297 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3298 		mutex_lock(&kvm->lock);
3299 		kvm_s390_pci_init_list(kvm);
3300 		kvm_s390_vcpu_pci_enable_interp(kvm);
3301 		mutex_unlock(&kvm->lock);
3302 	}
3303 
3304 	mutex_init(&kvm->arch.float_int.ais_lock);
3305 	spin_lock_init(&kvm->arch.float_int.lock);
3306 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3307 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3308 	init_waitqueue_head(&kvm->arch.ipte_wq);
3309 	mutex_init(&kvm->arch.ipte_mutex);
3310 
3311 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3312 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3313 
3314 	if (type & KVM_VM_S390_UCONTROL) {
3315 		kvm->arch.gmap = NULL;
3316 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3317 	} else {
3318 		if (sclp.hamax == U64_MAX)
3319 			kvm->arch.mem_limit = TASK_SIZE_MAX;
3320 		else
3321 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3322 						    sclp.hamax + 1);
3323 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3324 		if (!kvm->arch.gmap)
3325 			goto out_err;
3326 		kvm->arch.gmap->private = kvm;
3327 		kvm->arch.gmap->pfault_enabled = 0;
3328 	}
3329 
3330 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3331 	kvm->arch.use_skf = sclp.has_skey;
3332 	spin_lock_init(&kvm->arch.start_stop_lock);
3333 	kvm_s390_vsie_init(kvm);
3334 	if (use_gisa)
3335 		kvm_s390_gisa_init(kvm);
3336 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3337 	kvm->arch.pv.set_aside = NULL;
3338 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3339 
3340 	return 0;
3341 out_err:
3342 	free_page((unsigned long)kvm->arch.sie_page2);
3343 	debug_unregister(kvm->arch.dbf);
3344 	sca_dispose(kvm);
3345 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3346 	return rc;
3347 }
3348 
3349 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3350 {
3351 	u16 rc, rrc;
3352 
3353 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3354 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3355 	kvm_s390_clear_local_irqs(vcpu);
3356 	kvm_clear_async_pf_completion_queue(vcpu);
3357 	if (!kvm_is_ucontrol(vcpu->kvm))
3358 		sca_del_vcpu(vcpu);
3359 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3360 
3361 	if (kvm_is_ucontrol(vcpu->kvm))
3362 		gmap_remove(vcpu->arch.gmap);
3363 
3364 	if (vcpu->kvm->arch.use_cmma)
3365 		kvm_s390_vcpu_unsetup_cmma(vcpu);
3366 	/* We can not hold the vcpu mutex here, we are already dying */
3367 	if (kvm_s390_pv_cpu_get_handle(vcpu))
3368 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3369 	free_page((unsigned long)(vcpu->arch.sie_block));
3370 }
3371 
3372 void kvm_arch_destroy_vm(struct kvm *kvm)
3373 {
3374 	u16 rc, rrc;
3375 
3376 	kvm_destroy_vcpus(kvm);
3377 	sca_dispose(kvm);
3378 	kvm_s390_gisa_destroy(kvm);
3379 	/*
3380 	 * We are already at the end of life and kvm->lock is not taken.
3381 	 * This is ok as the file descriptor is closed by now and nobody
3382 	 * can mess with the pv state.
3383 	 */
3384 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3385 	/*
3386 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3387 	 * and only if one was registered to begin with. If the VM is
3388 	 * currently not protected, but has been previously been protected,
3389 	 * then it's possible that the notifier is still registered.
3390 	 */
3391 	if (kvm->arch.pv.mmu_notifier.ops)
3392 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3393 
3394 	debug_unregister(kvm->arch.dbf);
3395 	free_page((unsigned long)kvm->arch.sie_page2);
3396 	if (!kvm_is_ucontrol(kvm))
3397 		gmap_remove(kvm->arch.gmap);
3398 	kvm_s390_destroy_adapters(kvm);
3399 	kvm_s390_clear_float_irqs(kvm);
3400 	kvm_s390_vsie_destroy(kvm);
3401 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3402 }
3403 
3404 /* Section: vcpu related */
3405 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3406 {
3407 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3408 	if (!vcpu->arch.gmap)
3409 		return -ENOMEM;
3410 	vcpu->arch.gmap->private = vcpu->kvm;
3411 
3412 	return 0;
3413 }
3414 
3415 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3416 {
3417 	if (!kvm_s390_use_sca_entries())
3418 		return;
3419 	read_lock(&vcpu->kvm->arch.sca_lock);
3420 	if (vcpu->kvm->arch.use_esca) {
3421 		struct esca_block *sca = vcpu->kvm->arch.sca;
3422 
3423 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3424 		sca->cpu[vcpu->vcpu_id].sda = 0;
3425 	} else {
3426 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3427 
3428 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3429 		sca->cpu[vcpu->vcpu_id].sda = 0;
3430 	}
3431 	read_unlock(&vcpu->kvm->arch.sca_lock);
3432 }
3433 
3434 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3435 {
3436 	if (!kvm_s390_use_sca_entries()) {
3437 		phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3438 
3439 		/* we still need the basic sca for the ipte control */
3440 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3441 		vcpu->arch.sie_block->scaol = sca_phys;
3442 		return;
3443 	}
3444 	read_lock(&vcpu->kvm->arch.sca_lock);
3445 	if (vcpu->kvm->arch.use_esca) {
3446 		struct esca_block *sca = vcpu->kvm->arch.sca;
3447 		phys_addr_t sca_phys = virt_to_phys(sca);
3448 
3449 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3450 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3451 		vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3452 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3453 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3454 	} else {
3455 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3456 		phys_addr_t sca_phys = virt_to_phys(sca);
3457 
3458 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3459 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3460 		vcpu->arch.sie_block->scaol = sca_phys;
3461 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3462 	}
3463 	read_unlock(&vcpu->kvm->arch.sca_lock);
3464 }
3465 
3466 /* Basic SCA to Extended SCA data copy routines */
3467 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3468 {
3469 	d->sda = s->sda;
3470 	d->sigp_ctrl.c = s->sigp_ctrl.c;
3471 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3472 }
3473 
3474 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3475 {
3476 	int i;
3477 
3478 	d->ipte_control = s->ipte_control;
3479 	d->mcn[0] = s->mcn;
3480 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3481 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3482 }
3483 
3484 static int sca_switch_to_extended(struct kvm *kvm)
3485 {
3486 	struct bsca_block *old_sca = kvm->arch.sca;
3487 	struct esca_block *new_sca;
3488 	struct kvm_vcpu *vcpu;
3489 	unsigned long vcpu_idx;
3490 	u32 scaol, scaoh;
3491 	phys_addr_t new_sca_phys;
3492 
3493 	if (kvm->arch.use_esca)
3494 		return 0;
3495 
3496 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3497 	if (!new_sca)
3498 		return -ENOMEM;
3499 
3500 	new_sca_phys = virt_to_phys(new_sca);
3501 	scaoh = new_sca_phys >> 32;
3502 	scaol = new_sca_phys & ESCA_SCAOL_MASK;
3503 
3504 	kvm_s390_vcpu_block_all(kvm);
3505 	write_lock(&kvm->arch.sca_lock);
3506 
3507 	sca_copy_b_to_e(new_sca, old_sca);
3508 
3509 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3510 		vcpu->arch.sie_block->scaoh = scaoh;
3511 		vcpu->arch.sie_block->scaol = scaol;
3512 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3513 	}
3514 	kvm->arch.sca = new_sca;
3515 	kvm->arch.use_esca = 1;
3516 
3517 	write_unlock(&kvm->arch.sca_lock);
3518 	kvm_s390_vcpu_unblock_all(kvm);
3519 
3520 	free_page((unsigned long)old_sca);
3521 
3522 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3523 		 old_sca, kvm->arch.sca);
3524 	return 0;
3525 }
3526 
3527 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3528 {
3529 	int rc;
3530 
3531 	if (!kvm_s390_use_sca_entries()) {
3532 		if (id < KVM_MAX_VCPUS)
3533 			return true;
3534 		return false;
3535 	}
3536 	if (id < KVM_S390_BSCA_CPU_SLOTS)
3537 		return true;
3538 	if (!sclp.has_esca || !sclp.has_64bscao)
3539 		return false;
3540 
3541 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3542 
3543 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3544 }
3545 
3546 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3547 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3548 {
3549 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3550 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3551 	vcpu->arch.cputm_start = get_tod_clock_fast();
3552 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3553 }
3554 
3555 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3556 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3557 {
3558 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3559 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3560 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3561 	vcpu->arch.cputm_start = 0;
3562 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3563 }
3564 
3565 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3566 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3567 {
3568 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3569 	vcpu->arch.cputm_enabled = true;
3570 	__start_cpu_timer_accounting(vcpu);
3571 }
3572 
3573 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3574 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3575 {
3576 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3577 	__stop_cpu_timer_accounting(vcpu);
3578 	vcpu->arch.cputm_enabled = false;
3579 }
3580 
3581 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3582 {
3583 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3584 	__enable_cpu_timer_accounting(vcpu);
3585 	preempt_enable();
3586 }
3587 
3588 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3589 {
3590 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3591 	__disable_cpu_timer_accounting(vcpu);
3592 	preempt_enable();
3593 }
3594 
3595 /* set the cpu timer - may only be called from the VCPU thread itself */
3596 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3597 {
3598 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3599 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3600 	if (vcpu->arch.cputm_enabled)
3601 		vcpu->arch.cputm_start = get_tod_clock_fast();
3602 	vcpu->arch.sie_block->cputm = cputm;
3603 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3604 	preempt_enable();
3605 }
3606 
3607 /* update and get the cpu timer - can also be called from other VCPU threads */
3608 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3609 {
3610 	unsigned int seq;
3611 	__u64 value;
3612 
3613 	if (unlikely(!vcpu->arch.cputm_enabled))
3614 		return vcpu->arch.sie_block->cputm;
3615 
3616 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3617 	do {
3618 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3619 		/*
3620 		 * If the writer would ever execute a read in the critical
3621 		 * section, e.g. in irq context, we have a deadlock.
3622 		 */
3623 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3624 		value = vcpu->arch.sie_block->cputm;
3625 		/* if cputm_start is 0, accounting is being started/stopped */
3626 		if (likely(vcpu->arch.cputm_start))
3627 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3628 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3629 	preempt_enable();
3630 	return value;
3631 }
3632 
3633 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3634 {
3635 
3636 	gmap_enable(vcpu->arch.enabled_gmap);
3637 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3638 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3639 		__start_cpu_timer_accounting(vcpu);
3640 	vcpu->cpu = cpu;
3641 }
3642 
3643 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3644 {
3645 	vcpu->cpu = -1;
3646 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3647 		__stop_cpu_timer_accounting(vcpu);
3648 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3649 	vcpu->arch.enabled_gmap = gmap_get_enabled();
3650 	gmap_disable(vcpu->arch.enabled_gmap);
3651 
3652 }
3653 
3654 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3655 {
3656 	mutex_lock(&vcpu->kvm->lock);
3657 	preempt_disable();
3658 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3659 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3660 	preempt_enable();
3661 	mutex_unlock(&vcpu->kvm->lock);
3662 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3663 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3664 		sca_add_vcpu(vcpu);
3665 	}
3666 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3667 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3668 	/* make vcpu_load load the right gmap on the first trigger */
3669 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3670 }
3671 
3672 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3673 {
3674 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3675 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3676 		return true;
3677 	return false;
3678 }
3679 
3680 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3681 {
3682 	/* At least one ECC subfunction must be present */
3683 	return kvm_has_pckmo_subfunc(kvm, 32) ||
3684 	       kvm_has_pckmo_subfunc(kvm, 33) ||
3685 	       kvm_has_pckmo_subfunc(kvm, 34) ||
3686 	       kvm_has_pckmo_subfunc(kvm, 40) ||
3687 	       kvm_has_pckmo_subfunc(kvm, 41);
3688 
3689 }
3690 
3691 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3692 {
3693 	/*
3694 	 * If the AP instructions are not being interpreted and the MSAX3
3695 	 * facility is not configured for the guest, there is nothing to set up.
3696 	 */
3697 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3698 		return;
3699 
3700 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3701 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3702 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3703 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3704 
3705 	if (vcpu->kvm->arch.crypto.apie)
3706 		vcpu->arch.sie_block->eca |= ECA_APIE;
3707 
3708 	/* Set up protected key support */
3709 	if (vcpu->kvm->arch.crypto.aes_kw) {
3710 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3711 		/* ecc is also wrapped with AES key */
3712 		if (kvm_has_pckmo_ecc(vcpu->kvm))
3713 			vcpu->arch.sie_block->ecd |= ECD_ECC;
3714 	}
3715 
3716 	if (vcpu->kvm->arch.crypto.dea_kw)
3717 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3718 }
3719 
3720 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3721 {
3722 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3723 	vcpu->arch.sie_block->cbrlo = 0;
3724 }
3725 
3726 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3727 {
3728 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3729 
3730 	if (!cbrlo_page)
3731 		return -ENOMEM;
3732 
3733 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3734 	return 0;
3735 }
3736 
3737 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3738 {
3739 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3740 
3741 	vcpu->arch.sie_block->ibc = model->ibc;
3742 	if (test_kvm_facility(vcpu->kvm, 7))
3743 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3744 }
3745 
3746 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3747 {
3748 	int rc = 0;
3749 	u16 uvrc, uvrrc;
3750 
3751 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3752 						    CPUSTAT_SM |
3753 						    CPUSTAT_STOPPED);
3754 
3755 	if (test_kvm_facility(vcpu->kvm, 78))
3756 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3757 	else if (test_kvm_facility(vcpu->kvm, 8))
3758 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3759 
3760 	kvm_s390_vcpu_setup_model(vcpu);
3761 
3762 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3763 	if (MACHINE_HAS_ESOP)
3764 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3765 	if (test_kvm_facility(vcpu->kvm, 9))
3766 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3767 	if (test_kvm_facility(vcpu->kvm, 11))
3768 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3769 	if (test_kvm_facility(vcpu->kvm, 73))
3770 		vcpu->arch.sie_block->ecb |= ECB_TE;
3771 	if (!kvm_is_ucontrol(vcpu->kvm))
3772 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3773 
3774 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3775 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3776 	if (test_kvm_facility(vcpu->kvm, 130))
3777 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3778 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3779 	if (sclp.has_cei)
3780 		vcpu->arch.sie_block->eca |= ECA_CEI;
3781 	if (sclp.has_ib)
3782 		vcpu->arch.sie_block->eca |= ECA_IB;
3783 	if (sclp.has_siif)
3784 		vcpu->arch.sie_block->eca |= ECA_SII;
3785 	if (sclp.has_sigpif)
3786 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3787 	if (test_kvm_facility(vcpu->kvm, 129)) {
3788 		vcpu->arch.sie_block->eca |= ECA_VX;
3789 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3790 	}
3791 	if (test_kvm_facility(vcpu->kvm, 139))
3792 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3793 	if (test_kvm_facility(vcpu->kvm, 156))
3794 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3795 	if (vcpu->arch.sie_block->gd) {
3796 		vcpu->arch.sie_block->eca |= ECA_AIV;
3797 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3798 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3799 	}
3800 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3801 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3802 
3803 	if (sclp.has_kss)
3804 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3805 	else
3806 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3807 
3808 	if (vcpu->kvm->arch.use_cmma) {
3809 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3810 		if (rc)
3811 			return rc;
3812 	}
3813 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3814 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3815 
3816 	vcpu->arch.sie_block->hpid = HPID_KVM;
3817 
3818 	kvm_s390_vcpu_crypto_setup(vcpu);
3819 
3820 	kvm_s390_vcpu_pci_setup(vcpu);
3821 
3822 	mutex_lock(&vcpu->kvm->lock);
3823 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3824 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3825 		if (rc)
3826 			kvm_s390_vcpu_unsetup_cmma(vcpu);
3827 	}
3828 	mutex_unlock(&vcpu->kvm->lock);
3829 
3830 	return rc;
3831 }
3832 
3833 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3834 {
3835 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3836 		return -EINVAL;
3837 	return 0;
3838 }
3839 
3840 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3841 {
3842 	struct sie_page *sie_page;
3843 	int rc;
3844 
3845 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3846 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3847 	if (!sie_page)
3848 		return -ENOMEM;
3849 
3850 	vcpu->arch.sie_block = &sie_page->sie_block;
3851 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3852 
3853 	/* the real guest size will always be smaller than msl */
3854 	vcpu->arch.sie_block->mso = 0;
3855 	vcpu->arch.sie_block->msl = sclp.hamax;
3856 
3857 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3858 	spin_lock_init(&vcpu->arch.local_int.lock);
3859 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3860 	seqcount_init(&vcpu->arch.cputm_seqcount);
3861 
3862 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3863 	kvm_clear_async_pf_completion_queue(vcpu);
3864 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3865 				    KVM_SYNC_GPRS |
3866 				    KVM_SYNC_ACRS |
3867 				    KVM_SYNC_CRS |
3868 				    KVM_SYNC_ARCH0 |
3869 				    KVM_SYNC_PFAULT |
3870 				    KVM_SYNC_DIAG318;
3871 	kvm_s390_set_prefix(vcpu, 0);
3872 	if (test_kvm_facility(vcpu->kvm, 64))
3873 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3874 	if (test_kvm_facility(vcpu->kvm, 82))
3875 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3876 	if (test_kvm_facility(vcpu->kvm, 133))
3877 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3878 	if (test_kvm_facility(vcpu->kvm, 156))
3879 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3880 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3881 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3882 	 */
3883 	if (MACHINE_HAS_VX)
3884 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3885 	else
3886 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3887 
3888 	if (kvm_is_ucontrol(vcpu->kvm)) {
3889 		rc = __kvm_ucontrol_vcpu_init(vcpu);
3890 		if (rc)
3891 			goto out_free_sie_block;
3892 	}
3893 
3894 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3895 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3896 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3897 
3898 	rc = kvm_s390_vcpu_setup(vcpu);
3899 	if (rc)
3900 		goto out_ucontrol_uninit;
3901 
3902 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3903 	return 0;
3904 
3905 out_ucontrol_uninit:
3906 	if (kvm_is_ucontrol(vcpu->kvm))
3907 		gmap_remove(vcpu->arch.gmap);
3908 out_free_sie_block:
3909 	free_page((unsigned long)(vcpu->arch.sie_block));
3910 	return rc;
3911 }
3912 
3913 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3914 {
3915 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3916 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3917 }
3918 
3919 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3920 {
3921 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3922 }
3923 
3924 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
3925 {
3926 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3927 	exit_sie(vcpu);
3928 }
3929 
3930 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
3931 {
3932 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3933 }
3934 
3935 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3936 {
3937 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3938 	exit_sie(vcpu);
3939 }
3940 
3941 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3942 {
3943 	return atomic_read(&vcpu->arch.sie_block->prog20) &
3944 	       (PROG_BLOCK_SIE | PROG_REQUEST);
3945 }
3946 
3947 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3948 {
3949 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3950 }
3951 
3952 /*
3953  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
3954  * If the CPU is not running (e.g. waiting as idle) the function will
3955  * return immediately. */
3956 void exit_sie(struct kvm_vcpu *vcpu)
3957 {
3958 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
3959 	kvm_s390_vsie_kick(vcpu);
3960 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3961 		cpu_relax();
3962 }
3963 
3964 /* Kick a guest cpu out of SIE to process a request synchronously */
3965 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
3966 {
3967 	__kvm_make_request(req, vcpu);
3968 	kvm_s390_vcpu_request(vcpu);
3969 }
3970 
3971 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3972 			      unsigned long end)
3973 {
3974 	struct kvm *kvm = gmap->private;
3975 	struct kvm_vcpu *vcpu;
3976 	unsigned long prefix;
3977 	unsigned long i;
3978 
3979 	if (gmap_is_shadow(gmap))
3980 		return;
3981 	if (start >= 1UL << 31)
3982 		/* We are only interested in prefix pages */
3983 		return;
3984 	kvm_for_each_vcpu(i, vcpu, kvm) {
3985 		/* match against both prefix pages */
3986 		prefix = kvm_s390_get_prefix(vcpu);
3987 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3988 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3989 				   start, end);
3990 			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
3991 		}
3992 	}
3993 }
3994 
3995 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3996 {
3997 	/* do not poll with more than halt_poll_max_steal percent of steal time */
3998 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3999 	    READ_ONCE(halt_poll_max_steal)) {
4000 		vcpu->stat.halt_no_poll_steal++;
4001 		return true;
4002 	}
4003 	return false;
4004 }
4005 
4006 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4007 {
4008 	/* kvm common code refers to this, but never calls it */
4009 	BUG();
4010 	return 0;
4011 }
4012 
4013 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4014 					   struct kvm_one_reg *reg)
4015 {
4016 	int r = -EINVAL;
4017 
4018 	switch (reg->id) {
4019 	case KVM_REG_S390_TODPR:
4020 		r = put_user(vcpu->arch.sie_block->todpr,
4021 			     (u32 __user *)reg->addr);
4022 		break;
4023 	case KVM_REG_S390_EPOCHDIFF:
4024 		r = put_user(vcpu->arch.sie_block->epoch,
4025 			     (u64 __user *)reg->addr);
4026 		break;
4027 	case KVM_REG_S390_CPU_TIMER:
4028 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
4029 			     (u64 __user *)reg->addr);
4030 		break;
4031 	case KVM_REG_S390_CLOCK_COMP:
4032 		r = put_user(vcpu->arch.sie_block->ckc,
4033 			     (u64 __user *)reg->addr);
4034 		break;
4035 	case KVM_REG_S390_PFTOKEN:
4036 		r = put_user(vcpu->arch.pfault_token,
4037 			     (u64 __user *)reg->addr);
4038 		break;
4039 	case KVM_REG_S390_PFCOMPARE:
4040 		r = put_user(vcpu->arch.pfault_compare,
4041 			     (u64 __user *)reg->addr);
4042 		break;
4043 	case KVM_REG_S390_PFSELECT:
4044 		r = put_user(vcpu->arch.pfault_select,
4045 			     (u64 __user *)reg->addr);
4046 		break;
4047 	case KVM_REG_S390_PP:
4048 		r = put_user(vcpu->arch.sie_block->pp,
4049 			     (u64 __user *)reg->addr);
4050 		break;
4051 	case KVM_REG_S390_GBEA:
4052 		r = put_user(vcpu->arch.sie_block->gbea,
4053 			     (u64 __user *)reg->addr);
4054 		break;
4055 	default:
4056 		break;
4057 	}
4058 
4059 	return r;
4060 }
4061 
4062 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4063 					   struct kvm_one_reg *reg)
4064 {
4065 	int r = -EINVAL;
4066 	__u64 val;
4067 
4068 	switch (reg->id) {
4069 	case KVM_REG_S390_TODPR:
4070 		r = get_user(vcpu->arch.sie_block->todpr,
4071 			     (u32 __user *)reg->addr);
4072 		break;
4073 	case KVM_REG_S390_EPOCHDIFF:
4074 		r = get_user(vcpu->arch.sie_block->epoch,
4075 			     (u64 __user *)reg->addr);
4076 		break;
4077 	case KVM_REG_S390_CPU_TIMER:
4078 		r = get_user(val, (u64 __user *)reg->addr);
4079 		if (!r)
4080 			kvm_s390_set_cpu_timer(vcpu, val);
4081 		break;
4082 	case KVM_REG_S390_CLOCK_COMP:
4083 		r = get_user(vcpu->arch.sie_block->ckc,
4084 			     (u64 __user *)reg->addr);
4085 		break;
4086 	case KVM_REG_S390_PFTOKEN:
4087 		r = get_user(vcpu->arch.pfault_token,
4088 			     (u64 __user *)reg->addr);
4089 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4090 			kvm_clear_async_pf_completion_queue(vcpu);
4091 		break;
4092 	case KVM_REG_S390_PFCOMPARE:
4093 		r = get_user(vcpu->arch.pfault_compare,
4094 			     (u64 __user *)reg->addr);
4095 		break;
4096 	case KVM_REG_S390_PFSELECT:
4097 		r = get_user(vcpu->arch.pfault_select,
4098 			     (u64 __user *)reg->addr);
4099 		break;
4100 	case KVM_REG_S390_PP:
4101 		r = get_user(vcpu->arch.sie_block->pp,
4102 			     (u64 __user *)reg->addr);
4103 		break;
4104 	case KVM_REG_S390_GBEA:
4105 		r = get_user(vcpu->arch.sie_block->gbea,
4106 			     (u64 __user *)reg->addr);
4107 		break;
4108 	default:
4109 		break;
4110 	}
4111 
4112 	return r;
4113 }
4114 
4115 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4116 {
4117 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4118 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4119 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4120 
4121 	kvm_clear_async_pf_completion_queue(vcpu);
4122 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4123 		kvm_s390_vcpu_stop(vcpu);
4124 	kvm_s390_clear_local_irqs(vcpu);
4125 }
4126 
4127 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4128 {
4129 	/* Initial reset is a superset of the normal reset */
4130 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4131 
4132 	/*
4133 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
4134 	 * We do not only reset the internal data, but also ...
4135 	 */
4136 	vcpu->arch.sie_block->gpsw.mask = 0;
4137 	vcpu->arch.sie_block->gpsw.addr = 0;
4138 	kvm_s390_set_prefix(vcpu, 0);
4139 	kvm_s390_set_cpu_timer(vcpu, 0);
4140 	vcpu->arch.sie_block->ckc = 0;
4141 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4142 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4143 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4144 
4145 	/* ... the data in sync regs */
4146 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4147 	vcpu->run->s.regs.ckc = 0;
4148 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4149 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4150 	vcpu->run->psw_addr = 0;
4151 	vcpu->run->psw_mask = 0;
4152 	vcpu->run->s.regs.todpr = 0;
4153 	vcpu->run->s.regs.cputm = 0;
4154 	vcpu->run->s.regs.ckc = 0;
4155 	vcpu->run->s.regs.pp = 0;
4156 	vcpu->run->s.regs.gbea = 1;
4157 	vcpu->run->s.regs.fpc = 0;
4158 	/*
4159 	 * Do not reset these registers in the protected case, as some of
4160 	 * them are overlayed and they are not accessible in this case
4161 	 * anyway.
4162 	 */
4163 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4164 		vcpu->arch.sie_block->gbea = 1;
4165 		vcpu->arch.sie_block->pp = 0;
4166 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4167 		vcpu->arch.sie_block->todpr = 0;
4168 	}
4169 }
4170 
4171 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4172 {
4173 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4174 
4175 	/* Clear reset is a superset of the initial reset */
4176 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4177 
4178 	memset(&regs->gprs, 0, sizeof(regs->gprs));
4179 	memset(&regs->vrs, 0, sizeof(regs->vrs));
4180 	memset(&regs->acrs, 0, sizeof(regs->acrs));
4181 	memset(&regs->gscb, 0, sizeof(regs->gscb));
4182 
4183 	regs->etoken = 0;
4184 	regs->etoken_extension = 0;
4185 }
4186 
4187 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4188 {
4189 	vcpu_load(vcpu);
4190 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4191 	vcpu_put(vcpu);
4192 	return 0;
4193 }
4194 
4195 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4196 {
4197 	vcpu_load(vcpu);
4198 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4199 	vcpu_put(vcpu);
4200 	return 0;
4201 }
4202 
4203 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4204 				  struct kvm_sregs *sregs)
4205 {
4206 	vcpu_load(vcpu);
4207 
4208 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4209 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4210 
4211 	vcpu_put(vcpu);
4212 	return 0;
4213 }
4214 
4215 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4216 				  struct kvm_sregs *sregs)
4217 {
4218 	vcpu_load(vcpu);
4219 
4220 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4221 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4222 
4223 	vcpu_put(vcpu);
4224 	return 0;
4225 }
4226 
4227 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4228 {
4229 	int ret = 0;
4230 
4231 	vcpu_load(vcpu);
4232 
4233 	if (test_fp_ctl(fpu->fpc)) {
4234 		ret = -EINVAL;
4235 		goto out;
4236 	}
4237 	vcpu->run->s.regs.fpc = fpu->fpc;
4238 	if (MACHINE_HAS_VX)
4239 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4240 				 (freg_t *) fpu->fprs);
4241 	else
4242 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4243 
4244 out:
4245 	vcpu_put(vcpu);
4246 	return ret;
4247 }
4248 
4249 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4250 {
4251 	vcpu_load(vcpu);
4252 
4253 	/* make sure we have the latest values */
4254 	save_fpu_regs();
4255 	if (MACHINE_HAS_VX)
4256 		convert_vx_to_fp((freg_t *) fpu->fprs,
4257 				 (__vector128 *) vcpu->run->s.regs.vrs);
4258 	else
4259 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4260 	fpu->fpc = vcpu->run->s.regs.fpc;
4261 
4262 	vcpu_put(vcpu);
4263 	return 0;
4264 }
4265 
4266 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4267 {
4268 	int rc = 0;
4269 
4270 	if (!is_vcpu_stopped(vcpu))
4271 		rc = -EBUSY;
4272 	else {
4273 		vcpu->run->psw_mask = psw.mask;
4274 		vcpu->run->psw_addr = psw.addr;
4275 	}
4276 	return rc;
4277 }
4278 
4279 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4280 				  struct kvm_translation *tr)
4281 {
4282 	return -EINVAL; /* not implemented yet */
4283 }
4284 
4285 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4286 			      KVM_GUESTDBG_USE_HW_BP | \
4287 			      KVM_GUESTDBG_ENABLE)
4288 
4289 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4290 					struct kvm_guest_debug *dbg)
4291 {
4292 	int rc = 0;
4293 
4294 	vcpu_load(vcpu);
4295 
4296 	vcpu->guest_debug = 0;
4297 	kvm_s390_clear_bp_data(vcpu);
4298 
4299 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4300 		rc = -EINVAL;
4301 		goto out;
4302 	}
4303 	if (!sclp.has_gpere) {
4304 		rc = -EINVAL;
4305 		goto out;
4306 	}
4307 
4308 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4309 		vcpu->guest_debug = dbg->control;
4310 		/* enforce guest PER */
4311 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4312 
4313 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4314 			rc = kvm_s390_import_bp_data(vcpu, dbg);
4315 	} else {
4316 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4317 		vcpu->arch.guestdbg.last_bp = 0;
4318 	}
4319 
4320 	if (rc) {
4321 		vcpu->guest_debug = 0;
4322 		kvm_s390_clear_bp_data(vcpu);
4323 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4324 	}
4325 
4326 out:
4327 	vcpu_put(vcpu);
4328 	return rc;
4329 }
4330 
4331 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4332 				    struct kvm_mp_state *mp_state)
4333 {
4334 	int ret;
4335 
4336 	vcpu_load(vcpu);
4337 
4338 	/* CHECK_STOP and LOAD are not supported yet */
4339 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4340 				      KVM_MP_STATE_OPERATING;
4341 
4342 	vcpu_put(vcpu);
4343 	return ret;
4344 }
4345 
4346 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4347 				    struct kvm_mp_state *mp_state)
4348 {
4349 	int rc = 0;
4350 
4351 	vcpu_load(vcpu);
4352 
4353 	/* user space knows about this interface - let it control the state */
4354 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4355 
4356 	switch (mp_state->mp_state) {
4357 	case KVM_MP_STATE_STOPPED:
4358 		rc = kvm_s390_vcpu_stop(vcpu);
4359 		break;
4360 	case KVM_MP_STATE_OPERATING:
4361 		rc = kvm_s390_vcpu_start(vcpu);
4362 		break;
4363 	case KVM_MP_STATE_LOAD:
4364 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4365 			rc = -ENXIO;
4366 			break;
4367 		}
4368 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4369 		break;
4370 	case KVM_MP_STATE_CHECK_STOP:
4371 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4372 	default:
4373 		rc = -ENXIO;
4374 	}
4375 
4376 	vcpu_put(vcpu);
4377 	return rc;
4378 }
4379 
4380 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4381 {
4382 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4383 }
4384 
4385 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4386 {
4387 retry:
4388 	kvm_s390_vcpu_request_handled(vcpu);
4389 	if (!kvm_request_pending(vcpu))
4390 		return 0;
4391 	/*
4392 	 * If the guest prefix changed, re-arm the ipte notifier for the
4393 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4394 	 * This ensures that the ipte instruction for this request has
4395 	 * already finished. We might race against a second unmapper that
4396 	 * wants to set the blocking bit. Lets just retry the request loop.
4397 	 */
4398 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4399 		int rc;
4400 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4401 					  kvm_s390_get_prefix(vcpu),
4402 					  PAGE_SIZE * 2, PROT_WRITE);
4403 		if (rc) {
4404 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4405 			return rc;
4406 		}
4407 		goto retry;
4408 	}
4409 
4410 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4411 		vcpu->arch.sie_block->ihcpu = 0xffff;
4412 		goto retry;
4413 	}
4414 
4415 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4416 		if (!ibs_enabled(vcpu)) {
4417 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4418 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4419 		}
4420 		goto retry;
4421 	}
4422 
4423 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4424 		if (ibs_enabled(vcpu)) {
4425 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4426 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4427 		}
4428 		goto retry;
4429 	}
4430 
4431 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4432 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4433 		goto retry;
4434 	}
4435 
4436 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4437 		/*
4438 		 * Disable CMM virtualization; we will emulate the ESSA
4439 		 * instruction manually, in order to provide additional
4440 		 * functionalities needed for live migration.
4441 		 */
4442 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4443 		goto retry;
4444 	}
4445 
4446 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4447 		/*
4448 		 * Re-enable CMM virtualization if CMMA is available and
4449 		 * CMM has been used.
4450 		 */
4451 		if ((vcpu->kvm->arch.use_cmma) &&
4452 		    (vcpu->kvm->mm->context.uses_cmm))
4453 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4454 		goto retry;
4455 	}
4456 
4457 	/* we left the vsie handler, nothing to do, just clear the request */
4458 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4459 
4460 	return 0;
4461 }
4462 
4463 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4464 {
4465 	struct kvm_vcpu *vcpu;
4466 	union tod_clock clk;
4467 	unsigned long i;
4468 
4469 	preempt_disable();
4470 
4471 	store_tod_clock_ext(&clk);
4472 
4473 	kvm->arch.epoch = gtod->tod - clk.tod;
4474 	kvm->arch.epdx = 0;
4475 	if (test_kvm_facility(kvm, 139)) {
4476 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4477 		if (kvm->arch.epoch > gtod->tod)
4478 			kvm->arch.epdx -= 1;
4479 	}
4480 
4481 	kvm_s390_vcpu_block_all(kvm);
4482 	kvm_for_each_vcpu(i, vcpu, kvm) {
4483 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4484 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4485 	}
4486 
4487 	kvm_s390_vcpu_unblock_all(kvm);
4488 	preempt_enable();
4489 }
4490 
4491 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4492 {
4493 	if (!mutex_trylock(&kvm->lock))
4494 		return 0;
4495 	__kvm_s390_set_tod_clock(kvm, gtod);
4496 	mutex_unlock(&kvm->lock);
4497 	return 1;
4498 }
4499 
4500 /**
4501  * kvm_arch_fault_in_page - fault-in guest page if necessary
4502  * @vcpu: The corresponding virtual cpu
4503  * @gpa: Guest physical address
4504  * @writable: Whether the page should be writable or not
4505  *
4506  * Make sure that a guest page has been faulted-in on the host.
4507  *
4508  * Return: Zero on success, negative error code otherwise.
4509  */
4510 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
4511 {
4512 	return gmap_fault(vcpu->arch.gmap, gpa,
4513 			  writable ? FAULT_FLAG_WRITE : 0);
4514 }
4515 
4516 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4517 				      unsigned long token)
4518 {
4519 	struct kvm_s390_interrupt inti;
4520 	struct kvm_s390_irq irq;
4521 
4522 	if (start_token) {
4523 		irq.u.ext.ext_params2 = token;
4524 		irq.type = KVM_S390_INT_PFAULT_INIT;
4525 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4526 	} else {
4527 		inti.type = KVM_S390_INT_PFAULT_DONE;
4528 		inti.parm64 = token;
4529 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4530 	}
4531 }
4532 
4533 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4534 				     struct kvm_async_pf *work)
4535 {
4536 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4537 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4538 
4539 	return true;
4540 }
4541 
4542 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4543 				 struct kvm_async_pf *work)
4544 {
4545 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4546 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4547 }
4548 
4549 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4550 			       struct kvm_async_pf *work)
4551 {
4552 	/* s390 will always inject the page directly */
4553 }
4554 
4555 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4556 {
4557 	/*
4558 	 * s390 will always inject the page directly,
4559 	 * but we still want check_async_completion to cleanup
4560 	 */
4561 	return true;
4562 }
4563 
4564 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4565 {
4566 	hva_t hva;
4567 	struct kvm_arch_async_pf arch;
4568 
4569 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4570 		return false;
4571 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4572 	    vcpu->arch.pfault_compare)
4573 		return false;
4574 	if (psw_extint_disabled(vcpu))
4575 		return false;
4576 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4577 		return false;
4578 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4579 		return false;
4580 	if (!vcpu->arch.gmap->pfault_enabled)
4581 		return false;
4582 
4583 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4584 	hva += current->thread.gmap_addr & ~PAGE_MASK;
4585 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4586 		return false;
4587 
4588 	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4589 }
4590 
4591 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4592 {
4593 	int rc, cpuflags;
4594 
4595 	/*
4596 	 * On s390 notifications for arriving pages will be delivered directly
4597 	 * to the guest but the house keeping for completed pfaults is
4598 	 * handled outside the worker.
4599 	 */
4600 	kvm_check_async_pf_completion(vcpu);
4601 
4602 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4603 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4604 
4605 	if (need_resched())
4606 		schedule();
4607 
4608 	if (!kvm_is_ucontrol(vcpu->kvm)) {
4609 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4610 		if (rc)
4611 			return rc;
4612 	}
4613 
4614 	rc = kvm_s390_handle_requests(vcpu);
4615 	if (rc)
4616 		return rc;
4617 
4618 	if (guestdbg_enabled(vcpu)) {
4619 		kvm_s390_backup_guest_per_regs(vcpu);
4620 		kvm_s390_patch_guest_per_regs(vcpu);
4621 	}
4622 
4623 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4624 
4625 	vcpu->arch.sie_block->icptcode = 0;
4626 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4627 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4628 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
4629 
4630 	return 0;
4631 }
4632 
4633 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4634 {
4635 	struct kvm_s390_pgm_info pgm_info = {
4636 		.code = PGM_ADDRESSING,
4637 	};
4638 	u8 opcode, ilen;
4639 	int rc;
4640 
4641 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4642 	trace_kvm_s390_sie_fault(vcpu);
4643 
4644 	/*
4645 	 * We want to inject an addressing exception, which is defined as a
4646 	 * suppressing or terminating exception. However, since we came here
4647 	 * by a DAT access exception, the PSW still points to the faulting
4648 	 * instruction since DAT exceptions are nullifying. So we've got
4649 	 * to look up the current opcode to get the length of the instruction
4650 	 * to be able to forward the PSW.
4651 	 */
4652 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4653 	ilen = insn_length(opcode);
4654 	if (rc < 0) {
4655 		return rc;
4656 	} else if (rc) {
4657 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4658 		 * Forward by arbitrary ilc, injection will take care of
4659 		 * nullification if necessary.
4660 		 */
4661 		pgm_info = vcpu->arch.pgm;
4662 		ilen = 4;
4663 	}
4664 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4665 	kvm_s390_forward_psw(vcpu, ilen);
4666 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4667 }
4668 
4669 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4670 {
4671 	struct mcck_volatile_info *mcck_info;
4672 	struct sie_page *sie_page;
4673 
4674 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4675 		   vcpu->arch.sie_block->icptcode);
4676 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4677 
4678 	if (guestdbg_enabled(vcpu))
4679 		kvm_s390_restore_guest_per_regs(vcpu);
4680 
4681 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4682 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4683 
4684 	if (exit_reason == -EINTR) {
4685 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
4686 		sie_page = container_of(vcpu->arch.sie_block,
4687 					struct sie_page, sie_block);
4688 		mcck_info = &sie_page->mcck_info;
4689 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4690 		return 0;
4691 	}
4692 
4693 	if (vcpu->arch.sie_block->icptcode > 0) {
4694 		int rc = kvm_handle_sie_intercept(vcpu);
4695 
4696 		if (rc != -EOPNOTSUPP)
4697 			return rc;
4698 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4699 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4700 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4701 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4702 		return -EREMOTE;
4703 	} else if (exit_reason != -EFAULT) {
4704 		vcpu->stat.exit_null++;
4705 		return 0;
4706 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4707 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4708 		vcpu->run->s390_ucontrol.trans_exc_code =
4709 						current->thread.gmap_addr;
4710 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
4711 		return -EREMOTE;
4712 	} else if (current->thread.gmap_pfault) {
4713 		trace_kvm_s390_major_guest_pfault(vcpu);
4714 		current->thread.gmap_pfault = 0;
4715 		if (kvm_arch_setup_async_pf(vcpu))
4716 			return 0;
4717 		vcpu->stat.pfault_sync++;
4718 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4719 	}
4720 	return vcpu_post_run_fault_in_sie(vcpu);
4721 }
4722 
4723 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4724 static int __vcpu_run(struct kvm_vcpu *vcpu)
4725 {
4726 	int rc, exit_reason;
4727 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4728 
4729 	/*
4730 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4731 	 * ning the guest), so that memslots (and other stuff) are protected
4732 	 */
4733 	kvm_vcpu_srcu_read_lock(vcpu);
4734 
4735 	do {
4736 		rc = vcpu_pre_run(vcpu);
4737 		if (rc)
4738 			break;
4739 
4740 		kvm_vcpu_srcu_read_unlock(vcpu);
4741 		/*
4742 		 * As PF_VCPU will be used in fault handler, between
4743 		 * guest_enter and guest_exit should be no uaccess.
4744 		 */
4745 		local_irq_disable();
4746 		guest_enter_irqoff();
4747 		__disable_cpu_timer_accounting(vcpu);
4748 		local_irq_enable();
4749 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4750 			memcpy(sie_page->pv_grregs,
4751 			       vcpu->run->s.regs.gprs,
4752 			       sizeof(sie_page->pv_grregs));
4753 		}
4754 		if (test_cpu_flag(CIF_FPU))
4755 			load_fpu_regs();
4756 		exit_reason = sie64a(vcpu->arch.sie_block,
4757 				     vcpu->run->s.regs.gprs);
4758 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4759 			memcpy(vcpu->run->s.regs.gprs,
4760 			       sie_page->pv_grregs,
4761 			       sizeof(sie_page->pv_grregs));
4762 			/*
4763 			 * We're not allowed to inject interrupts on intercepts
4764 			 * that leave the guest state in an "in-between" state
4765 			 * where the next SIE entry will do a continuation.
4766 			 * Fence interrupts in our "internal" PSW.
4767 			 */
4768 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4769 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4770 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4771 			}
4772 		}
4773 		local_irq_disable();
4774 		__enable_cpu_timer_accounting(vcpu);
4775 		guest_exit_irqoff();
4776 		local_irq_enable();
4777 		kvm_vcpu_srcu_read_lock(vcpu);
4778 
4779 		rc = vcpu_post_run(vcpu, exit_reason);
4780 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4781 
4782 	kvm_vcpu_srcu_read_unlock(vcpu);
4783 	return rc;
4784 }
4785 
4786 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4787 {
4788 	struct kvm_run *kvm_run = vcpu->run;
4789 	struct runtime_instr_cb *riccb;
4790 	struct gs_cb *gscb;
4791 
4792 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4793 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4794 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4795 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4796 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4797 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4798 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4799 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4800 	}
4801 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4802 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4803 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4804 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4805 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4806 			kvm_clear_async_pf_completion_queue(vcpu);
4807 	}
4808 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4809 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4810 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4811 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4812 	}
4813 	/*
4814 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4815 	 * we should enable RI here instead of doing the lazy enablement.
4816 	 */
4817 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4818 	    test_kvm_facility(vcpu->kvm, 64) &&
4819 	    riccb->v &&
4820 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4821 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4822 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4823 	}
4824 	/*
4825 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
4826 	 * we should enable GS here instead of doing the lazy enablement.
4827 	 */
4828 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4829 	    test_kvm_facility(vcpu->kvm, 133) &&
4830 	    gscb->gssm &&
4831 	    !vcpu->arch.gs_enabled) {
4832 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4833 		vcpu->arch.sie_block->ecb |= ECB_GS;
4834 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4835 		vcpu->arch.gs_enabled = 1;
4836 	}
4837 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4838 	    test_kvm_facility(vcpu->kvm, 82)) {
4839 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4840 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4841 	}
4842 	if (MACHINE_HAS_GS) {
4843 		preempt_disable();
4844 		__ctl_set_bit(2, 4);
4845 		if (current->thread.gs_cb) {
4846 			vcpu->arch.host_gscb = current->thread.gs_cb;
4847 			save_gs_cb(vcpu->arch.host_gscb);
4848 		}
4849 		if (vcpu->arch.gs_enabled) {
4850 			current->thread.gs_cb = (struct gs_cb *)
4851 						&vcpu->run->s.regs.gscb;
4852 			restore_gs_cb(current->thread.gs_cb);
4853 		}
4854 		preempt_enable();
4855 	}
4856 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4857 }
4858 
4859 static void sync_regs(struct kvm_vcpu *vcpu)
4860 {
4861 	struct kvm_run *kvm_run = vcpu->run;
4862 
4863 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4864 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4865 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4866 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4867 		/* some control register changes require a tlb flush */
4868 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4869 	}
4870 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4871 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4872 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4873 	}
4874 	save_access_regs(vcpu->arch.host_acrs);
4875 	restore_access_regs(vcpu->run->s.regs.acrs);
4876 	/* save host (userspace) fprs/vrs */
4877 	save_fpu_regs();
4878 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4879 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4880 	if (MACHINE_HAS_VX)
4881 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4882 	else
4883 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4884 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4885 	if (test_fp_ctl(current->thread.fpu.fpc))
4886 		/* User space provided an invalid FPC, let's clear it */
4887 		current->thread.fpu.fpc = 0;
4888 
4889 	/* Sync fmt2 only data */
4890 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4891 		sync_regs_fmt2(vcpu);
4892 	} else {
4893 		/*
4894 		 * In several places we have to modify our internal view to
4895 		 * not do things that are disallowed by the ultravisor. For
4896 		 * example we must not inject interrupts after specific exits
4897 		 * (e.g. 112 prefix page not secure). We do this by turning
4898 		 * off the machine check, external and I/O interrupt bits
4899 		 * of our PSW copy. To avoid getting validity intercepts, we
4900 		 * do only accept the condition code from userspace.
4901 		 */
4902 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4903 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4904 						   PSW_MASK_CC;
4905 	}
4906 
4907 	kvm_run->kvm_dirty_regs = 0;
4908 }
4909 
4910 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4911 {
4912 	struct kvm_run *kvm_run = vcpu->run;
4913 
4914 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4915 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4916 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4917 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4918 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4919 	if (MACHINE_HAS_GS) {
4920 		preempt_disable();
4921 		__ctl_set_bit(2, 4);
4922 		if (vcpu->arch.gs_enabled)
4923 			save_gs_cb(current->thread.gs_cb);
4924 		current->thread.gs_cb = vcpu->arch.host_gscb;
4925 		restore_gs_cb(vcpu->arch.host_gscb);
4926 		if (!vcpu->arch.host_gscb)
4927 			__ctl_clear_bit(2, 4);
4928 		vcpu->arch.host_gscb = NULL;
4929 		preempt_enable();
4930 	}
4931 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4932 }
4933 
4934 static void store_regs(struct kvm_vcpu *vcpu)
4935 {
4936 	struct kvm_run *kvm_run = vcpu->run;
4937 
4938 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4939 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4940 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4941 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4942 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4943 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4944 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4945 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4946 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4947 	save_access_regs(vcpu->run->s.regs.acrs);
4948 	restore_access_regs(vcpu->arch.host_acrs);
4949 	/* Save guest register state */
4950 	save_fpu_regs();
4951 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4952 	/* Restore will be done lazily at return */
4953 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4954 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4955 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4956 		store_regs_fmt2(vcpu);
4957 }
4958 
4959 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4960 {
4961 	struct kvm_run *kvm_run = vcpu->run;
4962 	int rc;
4963 
4964 	/*
4965 	 * Running a VM while dumping always has the potential to
4966 	 * produce inconsistent dump data. But for PV vcpus a SIE
4967 	 * entry while dumping could also lead to a fatal validity
4968 	 * intercept which we absolutely want to avoid.
4969 	 */
4970 	if (vcpu->kvm->arch.pv.dumping)
4971 		return -EINVAL;
4972 
4973 	if (kvm_run->immediate_exit)
4974 		return -EINTR;
4975 
4976 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4977 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4978 		return -EINVAL;
4979 
4980 	vcpu_load(vcpu);
4981 
4982 	if (guestdbg_exit_pending(vcpu)) {
4983 		kvm_s390_prepare_debug_exit(vcpu);
4984 		rc = 0;
4985 		goto out;
4986 	}
4987 
4988 	kvm_sigset_activate(vcpu);
4989 
4990 	/*
4991 	 * no need to check the return value of vcpu_start as it can only have
4992 	 * an error for protvirt, but protvirt means user cpu state
4993 	 */
4994 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4995 		kvm_s390_vcpu_start(vcpu);
4996 	} else if (is_vcpu_stopped(vcpu)) {
4997 		pr_err_ratelimited("can't run stopped vcpu %d\n",
4998 				   vcpu->vcpu_id);
4999 		rc = -EINVAL;
5000 		goto out;
5001 	}
5002 
5003 	sync_regs(vcpu);
5004 	enable_cpu_timer_accounting(vcpu);
5005 
5006 	might_fault();
5007 	rc = __vcpu_run(vcpu);
5008 
5009 	if (signal_pending(current) && !rc) {
5010 		kvm_run->exit_reason = KVM_EXIT_INTR;
5011 		rc = -EINTR;
5012 	}
5013 
5014 	if (guestdbg_exit_pending(vcpu) && !rc)  {
5015 		kvm_s390_prepare_debug_exit(vcpu);
5016 		rc = 0;
5017 	}
5018 
5019 	if (rc == -EREMOTE) {
5020 		/* userspace support is needed, kvm_run has been prepared */
5021 		rc = 0;
5022 	}
5023 
5024 	disable_cpu_timer_accounting(vcpu);
5025 	store_regs(vcpu);
5026 
5027 	kvm_sigset_deactivate(vcpu);
5028 
5029 	vcpu->stat.exit_userspace++;
5030 out:
5031 	vcpu_put(vcpu);
5032 	return rc;
5033 }
5034 
5035 /*
5036  * store status at address
5037  * we use have two special cases:
5038  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5039  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5040  */
5041 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5042 {
5043 	unsigned char archmode = 1;
5044 	freg_t fprs[NUM_FPRS];
5045 	unsigned int px;
5046 	u64 clkcomp, cputm;
5047 	int rc;
5048 
5049 	px = kvm_s390_get_prefix(vcpu);
5050 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5051 		if (write_guest_abs(vcpu, 163, &archmode, 1))
5052 			return -EFAULT;
5053 		gpa = 0;
5054 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5055 		if (write_guest_real(vcpu, 163, &archmode, 1))
5056 			return -EFAULT;
5057 		gpa = px;
5058 	} else
5059 		gpa -= __LC_FPREGS_SAVE_AREA;
5060 
5061 	/* manually convert vector registers if necessary */
5062 	if (MACHINE_HAS_VX) {
5063 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5064 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5065 				     fprs, 128);
5066 	} else {
5067 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5068 				     vcpu->run->s.regs.fprs, 128);
5069 	}
5070 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5071 			      vcpu->run->s.regs.gprs, 128);
5072 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5073 			      &vcpu->arch.sie_block->gpsw, 16);
5074 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5075 			      &px, 4);
5076 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5077 			      &vcpu->run->s.regs.fpc, 4);
5078 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5079 			      &vcpu->arch.sie_block->todpr, 4);
5080 	cputm = kvm_s390_get_cpu_timer(vcpu);
5081 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5082 			      &cputm, 8);
5083 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5084 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5085 			      &clkcomp, 8);
5086 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5087 			      &vcpu->run->s.regs.acrs, 64);
5088 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5089 			      &vcpu->arch.sie_block->gcr, 128);
5090 	return rc ? -EFAULT : 0;
5091 }
5092 
5093 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5094 {
5095 	/*
5096 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5097 	 * switch in the run ioctl. Let's update our copies before we save
5098 	 * it into the save area
5099 	 */
5100 	save_fpu_regs();
5101 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5102 	save_access_regs(vcpu->run->s.regs.acrs);
5103 
5104 	return kvm_s390_store_status_unloaded(vcpu, addr);
5105 }
5106 
5107 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5108 {
5109 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5110 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5111 }
5112 
5113 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5114 {
5115 	unsigned long i;
5116 	struct kvm_vcpu *vcpu;
5117 
5118 	kvm_for_each_vcpu(i, vcpu, kvm) {
5119 		__disable_ibs_on_vcpu(vcpu);
5120 	}
5121 }
5122 
5123 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5124 {
5125 	if (!sclp.has_ibs)
5126 		return;
5127 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5128 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5129 }
5130 
5131 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5132 {
5133 	int i, online_vcpus, r = 0, started_vcpus = 0;
5134 
5135 	if (!is_vcpu_stopped(vcpu))
5136 		return 0;
5137 
5138 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5139 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5140 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5141 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5142 
5143 	/* Let's tell the UV that we want to change into the operating state */
5144 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5145 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5146 		if (r) {
5147 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5148 			return r;
5149 		}
5150 	}
5151 
5152 	for (i = 0; i < online_vcpus; i++) {
5153 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5154 			started_vcpus++;
5155 	}
5156 
5157 	if (started_vcpus == 0) {
5158 		/* we're the only active VCPU -> speed it up */
5159 		__enable_ibs_on_vcpu(vcpu);
5160 	} else if (started_vcpus == 1) {
5161 		/*
5162 		 * As we are starting a second VCPU, we have to disable
5163 		 * the IBS facility on all VCPUs to remove potentially
5164 		 * outstanding ENABLE requests.
5165 		 */
5166 		__disable_ibs_on_all_vcpus(vcpu->kvm);
5167 	}
5168 
5169 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5170 	/*
5171 	 * The real PSW might have changed due to a RESTART interpreted by the
5172 	 * ultravisor. We block all interrupts and let the next sie exit
5173 	 * refresh our view.
5174 	 */
5175 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5176 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5177 	/*
5178 	 * Another VCPU might have used IBS while we were offline.
5179 	 * Let's play safe and flush the VCPU at startup.
5180 	 */
5181 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5182 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5183 	return 0;
5184 }
5185 
5186 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5187 {
5188 	int i, online_vcpus, r = 0, started_vcpus = 0;
5189 	struct kvm_vcpu *started_vcpu = NULL;
5190 
5191 	if (is_vcpu_stopped(vcpu))
5192 		return 0;
5193 
5194 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5195 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5196 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5197 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5198 
5199 	/* Let's tell the UV that we want to change into the stopped state */
5200 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5201 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5202 		if (r) {
5203 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5204 			return r;
5205 		}
5206 	}
5207 
5208 	/*
5209 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5210 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5211 	 * have been fully processed. This will ensure that the VCPU
5212 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5213 	 */
5214 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5215 	kvm_s390_clear_stop_irq(vcpu);
5216 
5217 	__disable_ibs_on_vcpu(vcpu);
5218 
5219 	for (i = 0; i < online_vcpus; i++) {
5220 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5221 
5222 		if (!is_vcpu_stopped(tmp)) {
5223 			started_vcpus++;
5224 			started_vcpu = tmp;
5225 		}
5226 	}
5227 
5228 	if (started_vcpus == 1) {
5229 		/*
5230 		 * As we only have one VCPU left, we want to enable the
5231 		 * IBS facility for that VCPU to speed it up.
5232 		 */
5233 		__enable_ibs_on_vcpu(started_vcpu);
5234 	}
5235 
5236 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5237 	return 0;
5238 }
5239 
5240 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5241 				     struct kvm_enable_cap *cap)
5242 {
5243 	int r;
5244 
5245 	if (cap->flags)
5246 		return -EINVAL;
5247 
5248 	switch (cap->cap) {
5249 	case KVM_CAP_S390_CSS_SUPPORT:
5250 		if (!vcpu->kvm->arch.css_support) {
5251 			vcpu->kvm->arch.css_support = 1;
5252 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5253 			trace_kvm_s390_enable_css(vcpu->kvm);
5254 		}
5255 		r = 0;
5256 		break;
5257 	default:
5258 		r = -EINVAL;
5259 		break;
5260 	}
5261 	return r;
5262 }
5263 
5264 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5265 				  struct kvm_s390_mem_op *mop)
5266 {
5267 	void __user *uaddr = (void __user *)mop->buf;
5268 	void *sida_addr;
5269 	int r = 0;
5270 
5271 	if (mop->flags || !mop->size)
5272 		return -EINVAL;
5273 	if (mop->size + mop->sida_offset < mop->size)
5274 		return -EINVAL;
5275 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5276 		return -E2BIG;
5277 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5278 		return -EINVAL;
5279 
5280 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5281 
5282 	switch (mop->op) {
5283 	case KVM_S390_MEMOP_SIDA_READ:
5284 		if (copy_to_user(uaddr, sida_addr, mop->size))
5285 			r = -EFAULT;
5286 
5287 		break;
5288 	case KVM_S390_MEMOP_SIDA_WRITE:
5289 		if (copy_from_user(sida_addr, uaddr, mop->size))
5290 			r = -EFAULT;
5291 		break;
5292 	}
5293 	return r;
5294 }
5295 
5296 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5297 				 struct kvm_s390_mem_op *mop)
5298 {
5299 	void __user *uaddr = (void __user *)mop->buf;
5300 	enum gacc_mode acc_mode;
5301 	void *tmpbuf = NULL;
5302 	int r;
5303 
5304 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5305 					KVM_S390_MEMOP_F_CHECK_ONLY |
5306 					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5307 	if (r)
5308 		return r;
5309 	if (mop->ar >= NUM_ACRS)
5310 		return -EINVAL;
5311 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5312 		return -EINVAL;
5313 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5314 		tmpbuf = vmalloc(mop->size);
5315 		if (!tmpbuf)
5316 			return -ENOMEM;
5317 	}
5318 
5319 	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5320 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5321 		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5322 				    acc_mode, mop->key);
5323 		goto out_inject;
5324 	}
5325 	if (acc_mode == GACC_FETCH) {
5326 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5327 					mop->size, mop->key);
5328 		if (r)
5329 			goto out_inject;
5330 		if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5331 			r = -EFAULT;
5332 			goto out_free;
5333 		}
5334 	} else {
5335 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5336 			r = -EFAULT;
5337 			goto out_free;
5338 		}
5339 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5340 					 mop->size, mop->key);
5341 	}
5342 
5343 out_inject:
5344 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5345 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5346 
5347 out_free:
5348 	vfree(tmpbuf);
5349 	return r;
5350 }
5351 
5352 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5353 				     struct kvm_s390_mem_op *mop)
5354 {
5355 	int r, srcu_idx;
5356 
5357 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5358 
5359 	switch (mop->op) {
5360 	case KVM_S390_MEMOP_LOGICAL_READ:
5361 	case KVM_S390_MEMOP_LOGICAL_WRITE:
5362 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5363 		break;
5364 	case KVM_S390_MEMOP_SIDA_READ:
5365 	case KVM_S390_MEMOP_SIDA_WRITE:
5366 		/* we are locked against sida going away by the vcpu->mutex */
5367 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5368 		break;
5369 	default:
5370 		r = -EINVAL;
5371 	}
5372 
5373 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5374 	return r;
5375 }
5376 
5377 long kvm_arch_vcpu_async_ioctl(struct file *filp,
5378 			       unsigned int ioctl, unsigned long arg)
5379 {
5380 	struct kvm_vcpu *vcpu = filp->private_data;
5381 	void __user *argp = (void __user *)arg;
5382 
5383 	switch (ioctl) {
5384 	case KVM_S390_IRQ: {
5385 		struct kvm_s390_irq s390irq;
5386 
5387 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5388 			return -EFAULT;
5389 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
5390 	}
5391 	case KVM_S390_INTERRUPT: {
5392 		struct kvm_s390_interrupt s390int;
5393 		struct kvm_s390_irq s390irq = {};
5394 
5395 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5396 			return -EFAULT;
5397 		if (s390int_to_s390irq(&s390int, &s390irq))
5398 			return -EINVAL;
5399 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
5400 	}
5401 	}
5402 	return -ENOIOCTLCMD;
5403 }
5404 
5405 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5406 					struct kvm_pv_cmd *cmd)
5407 {
5408 	struct kvm_s390_pv_dmp dmp;
5409 	void *data;
5410 	int ret;
5411 
5412 	/* Dump initialization is a prerequisite */
5413 	if (!vcpu->kvm->arch.pv.dumping)
5414 		return -EINVAL;
5415 
5416 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5417 		return -EFAULT;
5418 
5419 	/* We only handle this subcmd right now */
5420 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5421 		return -EINVAL;
5422 
5423 	/* CPU dump length is the same as create cpu storage donation. */
5424 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5425 		return -EINVAL;
5426 
5427 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5428 	if (!data)
5429 		return -ENOMEM;
5430 
5431 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5432 
5433 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5434 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5435 
5436 	if (ret)
5437 		ret = -EINVAL;
5438 
5439 	/* On success copy over the dump data */
5440 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5441 		ret = -EFAULT;
5442 
5443 	kvfree(data);
5444 	return ret;
5445 }
5446 
5447 long kvm_arch_vcpu_ioctl(struct file *filp,
5448 			 unsigned int ioctl, unsigned long arg)
5449 {
5450 	struct kvm_vcpu *vcpu = filp->private_data;
5451 	void __user *argp = (void __user *)arg;
5452 	int idx;
5453 	long r;
5454 	u16 rc, rrc;
5455 
5456 	vcpu_load(vcpu);
5457 
5458 	switch (ioctl) {
5459 	case KVM_S390_STORE_STATUS:
5460 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5461 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5462 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5463 		break;
5464 	case KVM_S390_SET_INITIAL_PSW: {
5465 		psw_t psw;
5466 
5467 		r = -EFAULT;
5468 		if (copy_from_user(&psw, argp, sizeof(psw)))
5469 			break;
5470 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5471 		break;
5472 	}
5473 	case KVM_S390_CLEAR_RESET:
5474 		r = 0;
5475 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5476 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5477 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5478 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5479 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5480 				   rc, rrc);
5481 		}
5482 		break;
5483 	case KVM_S390_INITIAL_RESET:
5484 		r = 0;
5485 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5486 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5487 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5488 					  UVC_CMD_CPU_RESET_INITIAL,
5489 					  &rc, &rrc);
5490 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5491 				   rc, rrc);
5492 		}
5493 		break;
5494 	case KVM_S390_NORMAL_RESET:
5495 		r = 0;
5496 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5497 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5498 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5499 					  UVC_CMD_CPU_RESET, &rc, &rrc);
5500 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5501 				   rc, rrc);
5502 		}
5503 		break;
5504 	case KVM_SET_ONE_REG:
5505 	case KVM_GET_ONE_REG: {
5506 		struct kvm_one_reg reg;
5507 		r = -EINVAL;
5508 		if (kvm_s390_pv_cpu_is_protected(vcpu))
5509 			break;
5510 		r = -EFAULT;
5511 		if (copy_from_user(&reg, argp, sizeof(reg)))
5512 			break;
5513 		if (ioctl == KVM_SET_ONE_REG)
5514 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5515 		else
5516 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5517 		break;
5518 	}
5519 #ifdef CONFIG_KVM_S390_UCONTROL
5520 	case KVM_S390_UCAS_MAP: {
5521 		struct kvm_s390_ucas_mapping ucasmap;
5522 
5523 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5524 			r = -EFAULT;
5525 			break;
5526 		}
5527 
5528 		if (!kvm_is_ucontrol(vcpu->kvm)) {
5529 			r = -EINVAL;
5530 			break;
5531 		}
5532 
5533 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5534 				     ucasmap.vcpu_addr, ucasmap.length);
5535 		break;
5536 	}
5537 	case KVM_S390_UCAS_UNMAP: {
5538 		struct kvm_s390_ucas_mapping ucasmap;
5539 
5540 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5541 			r = -EFAULT;
5542 			break;
5543 		}
5544 
5545 		if (!kvm_is_ucontrol(vcpu->kvm)) {
5546 			r = -EINVAL;
5547 			break;
5548 		}
5549 
5550 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5551 			ucasmap.length);
5552 		break;
5553 	}
5554 #endif
5555 	case KVM_S390_VCPU_FAULT: {
5556 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5557 		break;
5558 	}
5559 	case KVM_ENABLE_CAP:
5560 	{
5561 		struct kvm_enable_cap cap;
5562 		r = -EFAULT;
5563 		if (copy_from_user(&cap, argp, sizeof(cap)))
5564 			break;
5565 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5566 		break;
5567 	}
5568 	case KVM_S390_MEM_OP: {
5569 		struct kvm_s390_mem_op mem_op;
5570 
5571 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5572 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5573 		else
5574 			r = -EFAULT;
5575 		break;
5576 	}
5577 	case KVM_S390_SET_IRQ_STATE: {
5578 		struct kvm_s390_irq_state irq_state;
5579 
5580 		r = -EFAULT;
5581 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5582 			break;
5583 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5584 		    irq_state.len == 0 ||
5585 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5586 			r = -EINVAL;
5587 			break;
5588 		}
5589 		/* do not use irq_state.flags, it will break old QEMUs */
5590 		r = kvm_s390_set_irq_state(vcpu,
5591 					   (void __user *) irq_state.buf,
5592 					   irq_state.len);
5593 		break;
5594 	}
5595 	case KVM_S390_GET_IRQ_STATE: {
5596 		struct kvm_s390_irq_state irq_state;
5597 
5598 		r = -EFAULT;
5599 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5600 			break;
5601 		if (irq_state.len == 0) {
5602 			r = -EINVAL;
5603 			break;
5604 		}
5605 		/* do not use irq_state.flags, it will break old QEMUs */
5606 		r = kvm_s390_get_irq_state(vcpu,
5607 					   (__u8 __user *)  irq_state.buf,
5608 					   irq_state.len);
5609 		break;
5610 	}
5611 	case KVM_S390_PV_CPU_COMMAND: {
5612 		struct kvm_pv_cmd cmd;
5613 
5614 		r = -EINVAL;
5615 		if (!is_prot_virt_host())
5616 			break;
5617 
5618 		r = -EFAULT;
5619 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5620 			break;
5621 
5622 		r = -EINVAL;
5623 		if (cmd.flags)
5624 			break;
5625 
5626 		/* We only handle this cmd right now */
5627 		if (cmd.cmd != KVM_PV_DUMP)
5628 			break;
5629 
5630 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5631 
5632 		/* Always copy over UV rc / rrc data */
5633 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5634 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5635 			r = -EFAULT;
5636 		break;
5637 	}
5638 	default:
5639 		r = -ENOTTY;
5640 	}
5641 
5642 	vcpu_put(vcpu);
5643 	return r;
5644 }
5645 
5646 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5647 {
5648 #ifdef CONFIG_KVM_S390_UCONTROL
5649 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5650 		 && (kvm_is_ucontrol(vcpu->kvm))) {
5651 		vmf->page = virt_to_page(vcpu->arch.sie_block);
5652 		get_page(vmf->page);
5653 		return 0;
5654 	}
5655 #endif
5656 	return VM_FAULT_SIGBUS;
5657 }
5658 
5659 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5660 {
5661 	return true;
5662 }
5663 
5664 /* Section: memory related */
5665 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5666 				   const struct kvm_memory_slot *old,
5667 				   struct kvm_memory_slot *new,
5668 				   enum kvm_mr_change change)
5669 {
5670 	gpa_t size;
5671 
5672 	/* When we are protected, we should not change the memory slots */
5673 	if (kvm_s390_pv_get_handle(kvm))
5674 		return -EINVAL;
5675 
5676 	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5677 		/*
5678 		 * A few sanity checks. We can have memory slots which have to be
5679 		 * located/ended at a segment boundary (1MB). The memory in userland is
5680 		 * ok to be fragmented into various different vmas. It is okay to mmap()
5681 		 * and munmap() stuff in this slot after doing this call at any time
5682 		 */
5683 
5684 		if (new->userspace_addr & 0xffffful)
5685 			return -EINVAL;
5686 
5687 		size = new->npages * PAGE_SIZE;
5688 		if (size & 0xffffful)
5689 			return -EINVAL;
5690 
5691 		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5692 			return -EINVAL;
5693 	}
5694 
5695 	if (!kvm->arch.migration_mode)
5696 		return 0;
5697 
5698 	/*
5699 	 * Turn off migration mode when:
5700 	 * - userspace creates a new memslot with dirty logging off,
5701 	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5702 	 *   dirty logging is turned off.
5703 	 * Migration mode expects dirty page logging being enabled to store
5704 	 * its dirty bitmap.
5705 	 */
5706 	if (change != KVM_MR_DELETE &&
5707 	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5708 		WARN(kvm_s390_vm_stop_migration(kvm),
5709 		     "Failed to stop migration mode");
5710 
5711 	return 0;
5712 }
5713 
5714 void kvm_arch_commit_memory_region(struct kvm *kvm,
5715 				struct kvm_memory_slot *old,
5716 				const struct kvm_memory_slot *new,
5717 				enum kvm_mr_change change)
5718 {
5719 	int rc = 0;
5720 
5721 	switch (change) {
5722 	case KVM_MR_DELETE:
5723 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5724 					old->npages * PAGE_SIZE);
5725 		break;
5726 	case KVM_MR_MOVE:
5727 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5728 					old->npages * PAGE_SIZE);
5729 		if (rc)
5730 			break;
5731 		fallthrough;
5732 	case KVM_MR_CREATE:
5733 		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5734 				      new->base_gfn * PAGE_SIZE,
5735 				      new->npages * PAGE_SIZE);
5736 		break;
5737 	case KVM_MR_FLAGS_ONLY:
5738 		break;
5739 	default:
5740 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5741 	}
5742 	if (rc)
5743 		pr_warn("failed to commit memory region\n");
5744 	return;
5745 }
5746 
5747 static inline unsigned long nonhyp_mask(int i)
5748 {
5749 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5750 
5751 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5752 }
5753 
5754 static int __init kvm_s390_init(void)
5755 {
5756 	int i, r;
5757 
5758 	if (!sclp.has_sief2) {
5759 		pr_info("SIE is not available\n");
5760 		return -ENODEV;
5761 	}
5762 
5763 	if (nested && hpage) {
5764 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5765 		return -EINVAL;
5766 	}
5767 
5768 	for (i = 0; i < 16; i++)
5769 		kvm_s390_fac_base[i] |=
5770 			stfle_fac_list[i] & nonhyp_mask(i);
5771 
5772 	r = __kvm_s390_init();
5773 	if (r)
5774 		return r;
5775 
5776 	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5777 	if (r) {
5778 		__kvm_s390_exit();
5779 		return r;
5780 	}
5781 	return 0;
5782 }
5783 
5784 static void __exit kvm_s390_exit(void)
5785 {
5786 	kvm_exit();
5787 
5788 	__kvm_s390_exit();
5789 }
5790 
5791 module_init(kvm_s390_init);
5792 module_exit(kvm_s390_exit);
5793 
5794 /*
5795  * Enable autoloading of the kvm module.
5796  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5797  * since x86 takes a different approach.
5798  */
5799 #include <linux/miscdevice.h>
5800 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5801 MODULE_ALIAS("devname:kvm");
5802