xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision f4513867)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * hosting IBM Z kernel virtual machines (s390x)
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10  *               Jason J. Herne <jjherne@us.ibm.com>
11  */
12 
13 #define KMSG_COMPONENT "kvm-s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/mman.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/pgtable.h>
34 #include <linux/mmu_notifier.h>
35 
36 #include <asm/asm-offsets.h>
37 #include <asm/lowcore.h>
38 #include <asm/stp.h>
39 #include <asm/gmap.h>
40 #include <asm/nmi.h>
41 #include <asm/switch_to.h>
42 #include <asm/isc.h>
43 #include <asm/sclp.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
46 #include <asm/ap.h>
47 #include <asm/uv.h>
48 #include <asm/fpu/api.h>
49 #include "kvm-s390.h"
50 #include "gaccess.h"
51 #include "pci.h"
52 
53 #define CREATE_TRACE_POINTS
54 #include "trace.h"
55 #include "trace-s390.h"
56 
57 #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
58 #define LOCAL_IRQS 32
59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
61 
62 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63 	KVM_GENERIC_VM_STATS(),
64 	STATS_DESC_COUNTER(VM, inject_io),
65 	STATS_DESC_COUNTER(VM, inject_float_mchk),
66 	STATS_DESC_COUNTER(VM, inject_pfault_done),
67 	STATS_DESC_COUNTER(VM, inject_service_signal),
68 	STATS_DESC_COUNTER(VM, inject_virtio),
69 	STATS_DESC_COUNTER(VM, aen_forward),
70 	STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
71 	STATS_DESC_COUNTER(VM, gmap_shadow_create),
72 	STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
73 	STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
74 	STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
75 	STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
76 	STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
77 };
78 
79 const struct kvm_stats_header kvm_vm_stats_header = {
80 	.name_size = KVM_STATS_NAME_SIZE,
81 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
82 	.id_offset = sizeof(struct kvm_stats_header),
83 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
84 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
85 		       sizeof(kvm_vm_stats_desc),
86 };
87 
88 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
89 	KVM_GENERIC_VCPU_STATS(),
90 	STATS_DESC_COUNTER(VCPU, exit_userspace),
91 	STATS_DESC_COUNTER(VCPU, exit_null),
92 	STATS_DESC_COUNTER(VCPU, exit_external_request),
93 	STATS_DESC_COUNTER(VCPU, exit_io_request),
94 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
95 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
96 	STATS_DESC_COUNTER(VCPU, exit_validity),
97 	STATS_DESC_COUNTER(VCPU, exit_instruction),
98 	STATS_DESC_COUNTER(VCPU, exit_pei),
99 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
100 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
101 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
102 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
103 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
104 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
105 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
106 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
107 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
108 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
109 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
110 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
111 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
112 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
113 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
114 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
115 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
116 	STATS_DESC_COUNTER(VCPU, deliver_program),
117 	STATS_DESC_COUNTER(VCPU, deliver_io),
118 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
119 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
120 	STATS_DESC_COUNTER(VCPU, inject_ckc),
121 	STATS_DESC_COUNTER(VCPU, inject_cputm),
122 	STATS_DESC_COUNTER(VCPU, inject_external_call),
123 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
124 	STATS_DESC_COUNTER(VCPU, inject_mchk),
125 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
126 	STATS_DESC_COUNTER(VCPU, inject_program),
127 	STATS_DESC_COUNTER(VCPU, inject_restart),
128 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
129 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
130 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
131 	STATS_DESC_COUNTER(VCPU, instruction_gs),
132 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
133 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
134 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
135 	STATS_DESC_COUNTER(VCPU, instruction_lpswey),
136 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
137 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
138 	STATS_DESC_COUNTER(VCPU, instruction_sck),
139 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
140 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
141 	STATS_DESC_COUNTER(VCPU, instruction_spx),
142 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
143 	STATS_DESC_COUNTER(VCPU, instruction_stap),
144 	STATS_DESC_COUNTER(VCPU, instruction_iske),
145 	STATS_DESC_COUNTER(VCPU, instruction_ri),
146 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
147 	STATS_DESC_COUNTER(VCPU, instruction_sske),
148 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
149 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
150 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
151 	STATS_DESC_COUNTER(VCPU, instruction_tb),
152 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
153 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
154 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
155 	STATS_DESC_COUNTER(VCPU, instruction_sie),
156 	STATS_DESC_COUNTER(VCPU, instruction_essa),
157 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
158 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
159 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
160 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
161 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
162 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
163 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
164 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
165 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
166 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
167 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
168 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
169 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
170 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
171 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
172 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
173 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
174 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
175 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
176 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
177 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
178 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
179 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
180 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
181 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
182 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
183 	STATS_DESC_COUNTER(VCPU, pfault_sync)
184 };
185 
186 const struct kvm_stats_header kvm_vcpu_stats_header = {
187 	.name_size = KVM_STATS_NAME_SIZE,
188 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
189 	.id_offset = sizeof(struct kvm_stats_header),
190 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
191 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
192 		       sizeof(kvm_vcpu_stats_desc),
193 };
194 
195 /* allow nested virtualization in KVM (if enabled by user space) */
196 static int nested;
197 module_param(nested, int, S_IRUGO);
198 MODULE_PARM_DESC(nested, "Nested virtualization support");
199 
200 /* allow 1m huge page guest backing, if !nested */
201 static int hpage;
202 module_param(hpage, int, 0444);
203 MODULE_PARM_DESC(hpage, "1m huge page backing support");
204 
205 /* maximum percentage of steal time for polling.  >100 is treated like 100 */
206 static u8 halt_poll_max_steal = 10;
207 module_param(halt_poll_max_steal, byte, 0644);
208 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
209 
210 /* if set to true, the GISA will be initialized and used if available */
211 static bool use_gisa  = true;
212 module_param(use_gisa, bool, 0644);
213 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
214 
215 /* maximum diag9c forwarding per second */
216 unsigned int diag9c_forwarding_hz;
217 module_param(diag9c_forwarding_hz, uint, 0644);
218 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
219 
220 /*
221  * allow asynchronous deinit for protected guests; enable by default since
222  * the feature is opt-in anyway
223  */
224 static int async_destroy = 1;
225 module_param(async_destroy, int, 0444);
226 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
227 
228 /*
229  * For now we handle at most 16 double words as this is what the s390 base
230  * kernel handles and stores in the prefix page. If we ever need to go beyond
231  * this, this requires changes to code, but the external uapi can stay.
232  */
233 #define SIZE_INTERNAL 16
234 
235 /*
236  * Base feature mask that defines default mask for facilities. Consists of the
237  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
238  */
239 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
240 /*
241  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
242  * and defines the facilities that can be enabled via a cpu model.
243  */
244 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
245 
kvm_s390_fac_size(void)246 static unsigned long kvm_s390_fac_size(void)
247 {
248 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
249 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
250 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
251 		sizeof(stfle_fac_list));
252 
253 	return SIZE_INTERNAL;
254 }
255 
256 /* available cpu features supported by kvm */
257 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
258 /* available subfunctions indicated via query / "test bit" */
259 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
260 
261 static struct gmap_notifier gmap_notifier;
262 static struct gmap_notifier vsie_gmap_notifier;
263 debug_info_t *kvm_s390_dbf;
264 debug_info_t *kvm_s390_dbf_uv;
265 
266 /* Section: not file related */
267 /* forward declarations */
268 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
269 			      unsigned long end);
270 static int sca_switch_to_extended(struct kvm *kvm);
271 
kvm_clock_sync_scb(struct kvm_s390_sie_block * scb,u64 delta)272 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
273 {
274 	u8 delta_idx = 0;
275 
276 	/*
277 	 * The TOD jumps by delta, we have to compensate this by adding
278 	 * -delta to the epoch.
279 	 */
280 	delta = -delta;
281 
282 	/* sign-extension - we're adding to signed values below */
283 	if ((s64)delta < 0)
284 		delta_idx = -1;
285 
286 	scb->epoch += delta;
287 	if (scb->ecd & ECD_MEF) {
288 		scb->epdx += delta_idx;
289 		if (scb->epoch < delta)
290 			scb->epdx += 1;
291 	}
292 }
293 
294 /*
295  * This callback is executed during stop_machine(). All CPUs are therefore
296  * temporarily stopped. In order not to change guest behavior, we have to
297  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
298  * so a CPU won't be stopped while calculating with the epoch.
299  */
kvm_clock_sync(struct notifier_block * notifier,unsigned long val,void * v)300 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
301 			  void *v)
302 {
303 	struct kvm *kvm;
304 	struct kvm_vcpu *vcpu;
305 	unsigned long i;
306 	unsigned long long *delta = v;
307 
308 	list_for_each_entry(kvm, &vm_list, vm_list) {
309 		kvm_for_each_vcpu(i, vcpu, kvm) {
310 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
311 			if (i == 0) {
312 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
313 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
314 			}
315 			if (vcpu->arch.cputm_enabled)
316 				vcpu->arch.cputm_start += *delta;
317 			if (vcpu->arch.vsie_block)
318 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
319 						   *delta);
320 		}
321 	}
322 	return NOTIFY_OK;
323 }
324 
325 static struct notifier_block kvm_clock_notifier = {
326 	.notifier_call = kvm_clock_sync,
327 };
328 
allow_cpu_feat(unsigned long nr)329 static void allow_cpu_feat(unsigned long nr)
330 {
331 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
332 }
333 
plo_test_bit(unsigned char nr)334 static inline int plo_test_bit(unsigned char nr)
335 {
336 	unsigned long function = (unsigned long)nr | 0x100;
337 	int cc;
338 
339 	asm volatile(
340 		"	lgr	0,%[function]\n"
341 		/* Parameter registers are ignored for "test bit" */
342 		"	plo	0,0,0,0(0)\n"
343 		"	ipm	%0\n"
344 		"	srl	%0,28\n"
345 		: "=d" (cc)
346 		: [function] "d" (function)
347 		: "cc", "0");
348 	return cc == 0;
349 }
350 
__insn32_query(unsigned int opcode,u8 * query)351 static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
352 {
353 	asm volatile(
354 		"	lghi	0,0\n"
355 		"	lgr	1,%[query]\n"
356 		/* Parameter registers are ignored */
357 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
358 		:
359 		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
360 		: "cc", "memory", "0", "1");
361 }
362 
363 #define INSN_SORTL 0xb938
364 #define INSN_DFLTCC 0xb939
365 
kvm_s390_cpu_feat_init(void)366 static void __init kvm_s390_cpu_feat_init(void)
367 {
368 	int i;
369 
370 	for (i = 0; i < 256; ++i) {
371 		if (plo_test_bit(i))
372 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
373 	}
374 
375 	if (test_facility(28)) /* TOD-clock steering */
376 		ptff(kvm_s390_available_subfunc.ptff,
377 		     sizeof(kvm_s390_available_subfunc.ptff),
378 		     PTFF_QAF);
379 
380 	if (test_facility(17)) { /* MSA */
381 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
382 			      kvm_s390_available_subfunc.kmac);
383 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
384 			      kvm_s390_available_subfunc.kmc);
385 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
386 			      kvm_s390_available_subfunc.km);
387 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
388 			      kvm_s390_available_subfunc.kimd);
389 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
390 			      kvm_s390_available_subfunc.klmd);
391 	}
392 	if (test_facility(76)) /* MSA3 */
393 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
394 			      kvm_s390_available_subfunc.pckmo);
395 	if (test_facility(77)) { /* MSA4 */
396 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
397 			      kvm_s390_available_subfunc.kmctr);
398 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
399 			      kvm_s390_available_subfunc.kmf);
400 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
401 			      kvm_s390_available_subfunc.kmo);
402 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
403 			      kvm_s390_available_subfunc.pcc);
404 	}
405 	if (test_facility(57)) /* MSA5 */
406 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
407 			      kvm_s390_available_subfunc.ppno);
408 
409 	if (test_facility(146)) /* MSA8 */
410 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
411 			      kvm_s390_available_subfunc.kma);
412 
413 	if (test_facility(155)) /* MSA9 */
414 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
415 			      kvm_s390_available_subfunc.kdsa);
416 
417 	if (test_facility(150)) /* SORTL */
418 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
419 
420 	if (test_facility(151)) /* DFLTCC */
421 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
422 
423 	if (MACHINE_HAS_ESOP)
424 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
425 	/*
426 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
427 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
428 	 */
429 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
430 	    !test_facility(3) || !nested)
431 		return;
432 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
433 	if (sclp.has_64bscao)
434 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
435 	if (sclp.has_siif)
436 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
437 	if (sclp.has_gpere)
438 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
439 	if (sclp.has_gsls)
440 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
441 	if (sclp.has_ib)
442 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
443 	if (sclp.has_cei)
444 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
445 	if (sclp.has_ibs)
446 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
447 	if (sclp.has_kss)
448 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
449 	/*
450 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
451 	 * all skey handling functions read/set the skey from the PGSTE
452 	 * instead of the real storage key.
453 	 *
454 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
455 	 * pages being detected as preserved although they are resident.
456 	 *
457 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
458 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
459 	 *
460 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
461 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
462 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
463 	 *
464 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
465 	 * cannot easily shadow the SCA because of the ipte lock.
466 	 */
467 }
468 
__kvm_s390_init(void)469 static int __init __kvm_s390_init(void)
470 {
471 	int rc = -ENOMEM;
472 
473 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
474 	if (!kvm_s390_dbf)
475 		return -ENOMEM;
476 
477 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
478 	if (!kvm_s390_dbf_uv)
479 		goto err_kvm_uv;
480 
481 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
482 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
483 		goto err_debug_view;
484 
485 	kvm_s390_cpu_feat_init();
486 
487 	/* Register floating interrupt controller interface. */
488 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
489 	if (rc) {
490 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
491 		goto err_flic;
492 	}
493 
494 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
495 		rc = kvm_s390_pci_init();
496 		if (rc) {
497 			pr_err("Unable to allocate AIFT for PCI\n");
498 			goto err_pci;
499 		}
500 	}
501 
502 	rc = kvm_s390_gib_init(GAL_ISC);
503 	if (rc)
504 		goto err_gib;
505 
506 	gmap_notifier.notifier_call = kvm_gmap_notifier;
507 	gmap_register_pte_notifier(&gmap_notifier);
508 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
509 	gmap_register_pte_notifier(&vsie_gmap_notifier);
510 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
511 				       &kvm_clock_notifier);
512 
513 	return 0;
514 
515 err_gib:
516 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
517 		kvm_s390_pci_exit();
518 err_pci:
519 err_flic:
520 err_debug_view:
521 	debug_unregister(kvm_s390_dbf_uv);
522 err_kvm_uv:
523 	debug_unregister(kvm_s390_dbf);
524 	return rc;
525 }
526 
__kvm_s390_exit(void)527 static void __kvm_s390_exit(void)
528 {
529 	gmap_unregister_pte_notifier(&gmap_notifier);
530 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
531 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
532 					 &kvm_clock_notifier);
533 
534 	kvm_s390_gib_destroy();
535 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
536 		kvm_s390_pci_exit();
537 	debug_unregister(kvm_s390_dbf);
538 	debug_unregister(kvm_s390_dbf_uv);
539 }
540 
541 /* Section: device related */
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)542 long kvm_arch_dev_ioctl(struct file *filp,
543 			unsigned int ioctl, unsigned long arg)
544 {
545 	if (ioctl == KVM_S390_ENABLE_SIE)
546 		return s390_enable_sie();
547 	return -EINVAL;
548 }
549 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)550 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
551 {
552 	int r;
553 
554 	switch (ext) {
555 	case KVM_CAP_S390_PSW:
556 	case KVM_CAP_S390_GMAP:
557 	case KVM_CAP_SYNC_MMU:
558 #ifdef CONFIG_KVM_S390_UCONTROL
559 	case KVM_CAP_S390_UCONTROL:
560 #endif
561 	case KVM_CAP_ASYNC_PF:
562 	case KVM_CAP_SYNC_REGS:
563 	case KVM_CAP_ONE_REG:
564 	case KVM_CAP_ENABLE_CAP:
565 	case KVM_CAP_S390_CSS_SUPPORT:
566 	case KVM_CAP_IOEVENTFD:
567 	case KVM_CAP_DEVICE_CTRL:
568 	case KVM_CAP_S390_IRQCHIP:
569 	case KVM_CAP_VM_ATTRIBUTES:
570 	case KVM_CAP_MP_STATE:
571 	case KVM_CAP_IMMEDIATE_EXIT:
572 	case KVM_CAP_S390_INJECT_IRQ:
573 	case KVM_CAP_S390_USER_SIGP:
574 	case KVM_CAP_S390_USER_STSI:
575 	case KVM_CAP_S390_SKEYS:
576 	case KVM_CAP_S390_IRQ_STATE:
577 	case KVM_CAP_S390_USER_INSTR0:
578 	case KVM_CAP_S390_CMMA_MIGRATION:
579 	case KVM_CAP_S390_AIS:
580 	case KVM_CAP_S390_AIS_MIGRATION:
581 	case KVM_CAP_S390_VCPU_RESETS:
582 	case KVM_CAP_SET_GUEST_DEBUG:
583 	case KVM_CAP_S390_DIAG318:
584 	case KVM_CAP_IRQFD_RESAMPLE:
585 		r = 1;
586 		break;
587 	case KVM_CAP_SET_GUEST_DEBUG2:
588 		r = KVM_GUESTDBG_VALID_MASK;
589 		break;
590 	case KVM_CAP_S390_HPAGE_1M:
591 		r = 0;
592 		if (hpage && !kvm_is_ucontrol(kvm))
593 			r = 1;
594 		break;
595 	case KVM_CAP_S390_MEM_OP:
596 		r = MEM_OP_MAX_SIZE;
597 		break;
598 	case KVM_CAP_S390_MEM_OP_EXTENSION:
599 		/*
600 		 * Flag bits indicating which extensions are supported.
601 		 * If r > 0, the base extension must also be supported/indicated,
602 		 * in order to maintain backwards compatibility.
603 		 */
604 		r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
605 		    KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
606 		break;
607 	case KVM_CAP_NR_VCPUS:
608 	case KVM_CAP_MAX_VCPUS:
609 	case KVM_CAP_MAX_VCPU_ID:
610 		r = KVM_S390_BSCA_CPU_SLOTS;
611 		if (!kvm_s390_use_sca_entries())
612 			r = KVM_MAX_VCPUS;
613 		else if (sclp.has_esca && sclp.has_64bscao)
614 			r = KVM_S390_ESCA_CPU_SLOTS;
615 		if (ext == KVM_CAP_NR_VCPUS)
616 			r = min_t(unsigned int, num_online_cpus(), r);
617 		break;
618 	case KVM_CAP_S390_COW:
619 		r = MACHINE_HAS_ESOP;
620 		break;
621 	case KVM_CAP_S390_VECTOR_REGISTERS:
622 		r = MACHINE_HAS_VX;
623 		break;
624 	case KVM_CAP_S390_RI:
625 		r = test_facility(64);
626 		break;
627 	case KVM_CAP_S390_GS:
628 		r = test_facility(133);
629 		break;
630 	case KVM_CAP_S390_BPB:
631 		r = test_facility(82);
632 		break;
633 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
634 		r = async_destroy && is_prot_virt_host();
635 		break;
636 	case KVM_CAP_S390_PROTECTED:
637 		r = is_prot_virt_host();
638 		break;
639 	case KVM_CAP_S390_PROTECTED_DUMP: {
640 		u64 pv_cmds_dump[] = {
641 			BIT_UVC_CMD_DUMP_INIT,
642 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
643 			BIT_UVC_CMD_DUMP_CPU,
644 			BIT_UVC_CMD_DUMP_COMPLETE,
645 		};
646 		int i;
647 
648 		r = is_prot_virt_host();
649 
650 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
651 			if (!test_bit_inv(pv_cmds_dump[i],
652 					  (unsigned long *)&uv_info.inst_calls_list)) {
653 				r = 0;
654 				break;
655 			}
656 		}
657 		break;
658 	}
659 	case KVM_CAP_S390_ZPCI_OP:
660 		r = kvm_s390_pci_interp_allowed();
661 		break;
662 	case KVM_CAP_S390_CPU_TOPOLOGY:
663 		r = test_facility(11);
664 		break;
665 	default:
666 		r = 0;
667 	}
668 	return r;
669 }
670 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)671 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
672 {
673 	int i;
674 	gfn_t cur_gfn, last_gfn;
675 	unsigned long gaddr, vmaddr;
676 	struct gmap *gmap = kvm->arch.gmap;
677 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
678 
679 	/* Loop over all guest segments */
680 	cur_gfn = memslot->base_gfn;
681 	last_gfn = memslot->base_gfn + memslot->npages;
682 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
683 		gaddr = gfn_to_gpa(cur_gfn);
684 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
685 		if (kvm_is_error_hva(vmaddr))
686 			continue;
687 
688 		bitmap_zero(bitmap, _PAGE_ENTRIES);
689 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
690 		for (i = 0; i < _PAGE_ENTRIES; i++) {
691 			if (test_bit(i, bitmap))
692 				mark_page_dirty(kvm, cur_gfn + i);
693 		}
694 
695 		if (fatal_signal_pending(current))
696 			return;
697 		cond_resched();
698 	}
699 }
700 
701 /* Section: vm related */
702 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
703 
704 /*
705  * Get (and clear) the dirty memory log for a memory slot.
706  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)707 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
708 			       struct kvm_dirty_log *log)
709 {
710 	int r;
711 	unsigned long n;
712 	struct kvm_memory_slot *memslot;
713 	int is_dirty;
714 
715 	if (kvm_is_ucontrol(kvm))
716 		return -EINVAL;
717 
718 	mutex_lock(&kvm->slots_lock);
719 
720 	r = -EINVAL;
721 	if (log->slot >= KVM_USER_MEM_SLOTS)
722 		goto out;
723 
724 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
725 	if (r)
726 		goto out;
727 
728 	/* Clear the dirty log */
729 	if (is_dirty) {
730 		n = kvm_dirty_bitmap_bytes(memslot);
731 		memset(memslot->dirty_bitmap, 0, n);
732 	}
733 	r = 0;
734 out:
735 	mutex_unlock(&kvm->slots_lock);
736 	return r;
737 }
738 
icpt_operexc_on_all_vcpus(struct kvm * kvm)739 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
740 {
741 	unsigned long i;
742 	struct kvm_vcpu *vcpu;
743 
744 	kvm_for_each_vcpu(i, vcpu, kvm) {
745 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
746 	}
747 }
748 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)749 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
750 {
751 	int r;
752 
753 	if (cap->flags)
754 		return -EINVAL;
755 
756 	switch (cap->cap) {
757 	case KVM_CAP_S390_IRQCHIP:
758 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
759 		kvm->arch.use_irqchip = 1;
760 		r = 0;
761 		break;
762 	case KVM_CAP_S390_USER_SIGP:
763 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
764 		kvm->arch.user_sigp = 1;
765 		r = 0;
766 		break;
767 	case KVM_CAP_S390_VECTOR_REGISTERS:
768 		mutex_lock(&kvm->lock);
769 		if (kvm->created_vcpus) {
770 			r = -EBUSY;
771 		} else if (MACHINE_HAS_VX) {
772 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
773 			set_kvm_facility(kvm->arch.model.fac_list, 129);
774 			if (test_facility(134)) {
775 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
776 				set_kvm_facility(kvm->arch.model.fac_list, 134);
777 			}
778 			if (test_facility(135)) {
779 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
780 				set_kvm_facility(kvm->arch.model.fac_list, 135);
781 			}
782 			if (test_facility(148)) {
783 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
784 				set_kvm_facility(kvm->arch.model.fac_list, 148);
785 			}
786 			if (test_facility(152)) {
787 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
788 				set_kvm_facility(kvm->arch.model.fac_list, 152);
789 			}
790 			if (test_facility(192)) {
791 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
792 				set_kvm_facility(kvm->arch.model.fac_list, 192);
793 			}
794 			r = 0;
795 		} else
796 			r = -EINVAL;
797 		mutex_unlock(&kvm->lock);
798 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
799 			 r ? "(not available)" : "(success)");
800 		break;
801 	case KVM_CAP_S390_RI:
802 		r = -EINVAL;
803 		mutex_lock(&kvm->lock);
804 		if (kvm->created_vcpus) {
805 			r = -EBUSY;
806 		} else if (test_facility(64)) {
807 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
808 			set_kvm_facility(kvm->arch.model.fac_list, 64);
809 			r = 0;
810 		}
811 		mutex_unlock(&kvm->lock);
812 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
813 			 r ? "(not available)" : "(success)");
814 		break;
815 	case KVM_CAP_S390_AIS:
816 		mutex_lock(&kvm->lock);
817 		if (kvm->created_vcpus) {
818 			r = -EBUSY;
819 		} else {
820 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
821 			set_kvm_facility(kvm->arch.model.fac_list, 72);
822 			r = 0;
823 		}
824 		mutex_unlock(&kvm->lock);
825 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
826 			 r ? "(not available)" : "(success)");
827 		break;
828 	case KVM_CAP_S390_GS:
829 		r = -EINVAL;
830 		mutex_lock(&kvm->lock);
831 		if (kvm->created_vcpus) {
832 			r = -EBUSY;
833 		} else if (test_facility(133)) {
834 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
835 			set_kvm_facility(kvm->arch.model.fac_list, 133);
836 			r = 0;
837 		}
838 		mutex_unlock(&kvm->lock);
839 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
840 			 r ? "(not available)" : "(success)");
841 		break;
842 	case KVM_CAP_S390_HPAGE_1M:
843 		mutex_lock(&kvm->lock);
844 		if (kvm->created_vcpus)
845 			r = -EBUSY;
846 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
847 			r = -EINVAL;
848 		else {
849 			r = 0;
850 			mmap_write_lock(kvm->mm);
851 			kvm->mm->context.allow_gmap_hpage_1m = 1;
852 			mmap_write_unlock(kvm->mm);
853 			/*
854 			 * We might have to create fake 4k page
855 			 * tables. To avoid that the hardware works on
856 			 * stale PGSTEs, we emulate these instructions.
857 			 */
858 			kvm->arch.use_skf = 0;
859 			kvm->arch.use_pfmfi = 0;
860 		}
861 		mutex_unlock(&kvm->lock);
862 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
863 			 r ? "(not available)" : "(success)");
864 		break;
865 	case KVM_CAP_S390_USER_STSI:
866 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
867 		kvm->arch.user_stsi = 1;
868 		r = 0;
869 		break;
870 	case KVM_CAP_S390_USER_INSTR0:
871 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
872 		kvm->arch.user_instr0 = 1;
873 		icpt_operexc_on_all_vcpus(kvm);
874 		r = 0;
875 		break;
876 	case KVM_CAP_S390_CPU_TOPOLOGY:
877 		r = -EINVAL;
878 		mutex_lock(&kvm->lock);
879 		if (kvm->created_vcpus) {
880 			r = -EBUSY;
881 		} else if (test_facility(11)) {
882 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
883 			set_kvm_facility(kvm->arch.model.fac_list, 11);
884 			r = 0;
885 		}
886 		mutex_unlock(&kvm->lock);
887 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
888 			 r ? "(not available)" : "(success)");
889 		break;
890 	default:
891 		r = -EINVAL;
892 		break;
893 	}
894 	return r;
895 }
896 
kvm_s390_get_mem_control(struct kvm * kvm,struct kvm_device_attr * attr)897 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
898 {
899 	int ret;
900 
901 	switch (attr->attr) {
902 	case KVM_S390_VM_MEM_LIMIT_SIZE:
903 		ret = 0;
904 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
905 			 kvm->arch.mem_limit);
906 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
907 			ret = -EFAULT;
908 		break;
909 	default:
910 		ret = -ENXIO;
911 		break;
912 	}
913 	return ret;
914 }
915 
kvm_s390_set_mem_control(struct kvm * kvm,struct kvm_device_attr * attr)916 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
917 {
918 	int ret;
919 	unsigned int idx;
920 	switch (attr->attr) {
921 	case KVM_S390_VM_MEM_ENABLE_CMMA:
922 		ret = -ENXIO;
923 		if (!sclp.has_cmma)
924 			break;
925 
926 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
927 		mutex_lock(&kvm->lock);
928 		if (kvm->created_vcpus)
929 			ret = -EBUSY;
930 		else if (kvm->mm->context.allow_gmap_hpage_1m)
931 			ret = -EINVAL;
932 		else {
933 			kvm->arch.use_cmma = 1;
934 			/* Not compatible with cmma. */
935 			kvm->arch.use_pfmfi = 0;
936 			ret = 0;
937 		}
938 		mutex_unlock(&kvm->lock);
939 		break;
940 	case KVM_S390_VM_MEM_CLR_CMMA:
941 		ret = -ENXIO;
942 		if (!sclp.has_cmma)
943 			break;
944 		ret = -EINVAL;
945 		if (!kvm->arch.use_cmma)
946 			break;
947 
948 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
949 		mutex_lock(&kvm->lock);
950 		idx = srcu_read_lock(&kvm->srcu);
951 		s390_reset_cmma(kvm->arch.gmap->mm);
952 		srcu_read_unlock(&kvm->srcu, idx);
953 		mutex_unlock(&kvm->lock);
954 		ret = 0;
955 		break;
956 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
957 		unsigned long new_limit;
958 
959 		if (kvm_is_ucontrol(kvm))
960 			return -EINVAL;
961 
962 		if (get_user(new_limit, (u64 __user *)attr->addr))
963 			return -EFAULT;
964 
965 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
966 		    new_limit > kvm->arch.mem_limit)
967 			return -E2BIG;
968 
969 		if (!new_limit)
970 			return -EINVAL;
971 
972 		/* gmap_create takes last usable address */
973 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
974 			new_limit -= 1;
975 
976 		ret = -EBUSY;
977 		mutex_lock(&kvm->lock);
978 		if (!kvm->created_vcpus) {
979 			/* gmap_create will round the limit up */
980 			struct gmap *new = gmap_create(current->mm, new_limit);
981 
982 			if (!new) {
983 				ret = -ENOMEM;
984 			} else {
985 				gmap_remove(kvm->arch.gmap);
986 				new->private = kvm;
987 				kvm->arch.gmap = new;
988 				ret = 0;
989 			}
990 		}
991 		mutex_unlock(&kvm->lock);
992 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
993 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
994 			 (void *) kvm->arch.gmap->asce);
995 		break;
996 	}
997 	default:
998 		ret = -ENXIO;
999 		break;
1000 	}
1001 	return ret;
1002 }
1003 
1004 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1005 
kvm_s390_vcpu_crypto_reset_all(struct kvm * kvm)1006 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1007 {
1008 	struct kvm_vcpu *vcpu;
1009 	unsigned long i;
1010 
1011 	kvm_s390_vcpu_block_all(kvm);
1012 
1013 	kvm_for_each_vcpu(i, vcpu, kvm) {
1014 		kvm_s390_vcpu_crypto_setup(vcpu);
1015 		/* recreate the shadow crycb by leaving the VSIE handler */
1016 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1017 	}
1018 
1019 	kvm_s390_vcpu_unblock_all(kvm);
1020 }
1021 
kvm_s390_vm_set_crypto(struct kvm * kvm,struct kvm_device_attr * attr)1022 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1023 {
1024 	mutex_lock(&kvm->lock);
1025 	switch (attr->attr) {
1026 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1027 		if (!test_kvm_facility(kvm, 76)) {
1028 			mutex_unlock(&kvm->lock);
1029 			return -EINVAL;
1030 		}
1031 		get_random_bytes(
1032 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1033 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1034 		kvm->arch.crypto.aes_kw = 1;
1035 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1036 		break;
1037 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1038 		if (!test_kvm_facility(kvm, 76)) {
1039 			mutex_unlock(&kvm->lock);
1040 			return -EINVAL;
1041 		}
1042 		get_random_bytes(
1043 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1044 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1045 		kvm->arch.crypto.dea_kw = 1;
1046 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1047 		break;
1048 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1049 		if (!test_kvm_facility(kvm, 76)) {
1050 			mutex_unlock(&kvm->lock);
1051 			return -EINVAL;
1052 		}
1053 		kvm->arch.crypto.aes_kw = 0;
1054 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1055 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1056 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1057 		break;
1058 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1059 		if (!test_kvm_facility(kvm, 76)) {
1060 			mutex_unlock(&kvm->lock);
1061 			return -EINVAL;
1062 		}
1063 		kvm->arch.crypto.dea_kw = 0;
1064 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1065 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1066 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1067 		break;
1068 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1069 		if (!ap_instructions_available()) {
1070 			mutex_unlock(&kvm->lock);
1071 			return -EOPNOTSUPP;
1072 		}
1073 		kvm->arch.crypto.apie = 1;
1074 		break;
1075 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1076 		if (!ap_instructions_available()) {
1077 			mutex_unlock(&kvm->lock);
1078 			return -EOPNOTSUPP;
1079 		}
1080 		kvm->arch.crypto.apie = 0;
1081 		break;
1082 	default:
1083 		mutex_unlock(&kvm->lock);
1084 		return -ENXIO;
1085 	}
1086 
1087 	kvm_s390_vcpu_crypto_reset_all(kvm);
1088 	mutex_unlock(&kvm->lock);
1089 	return 0;
1090 }
1091 
kvm_s390_vcpu_pci_setup(struct kvm_vcpu * vcpu)1092 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1093 {
1094 	/* Only set the ECB bits after guest requests zPCI interpretation */
1095 	if (!vcpu->kvm->arch.use_zpci_interp)
1096 		return;
1097 
1098 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1099 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1100 }
1101 
kvm_s390_vcpu_pci_enable_interp(struct kvm * kvm)1102 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1103 {
1104 	struct kvm_vcpu *vcpu;
1105 	unsigned long i;
1106 
1107 	lockdep_assert_held(&kvm->lock);
1108 
1109 	if (!kvm_s390_pci_interp_allowed())
1110 		return;
1111 
1112 	/*
1113 	 * If host is configured for PCI and the necessary facilities are
1114 	 * available, turn on interpretation for the life of this guest
1115 	 */
1116 	kvm->arch.use_zpci_interp = 1;
1117 
1118 	kvm_s390_vcpu_block_all(kvm);
1119 
1120 	kvm_for_each_vcpu(i, vcpu, kvm) {
1121 		kvm_s390_vcpu_pci_setup(vcpu);
1122 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1123 	}
1124 
1125 	kvm_s390_vcpu_unblock_all(kvm);
1126 }
1127 
kvm_s390_sync_request_broadcast(struct kvm * kvm,int req)1128 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1129 {
1130 	unsigned long cx;
1131 	struct kvm_vcpu *vcpu;
1132 
1133 	kvm_for_each_vcpu(cx, vcpu, kvm)
1134 		kvm_s390_sync_request(req, vcpu);
1135 }
1136 
1137 /*
1138  * Must be called with kvm->srcu held to avoid races on memslots, and with
1139  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1140  */
kvm_s390_vm_start_migration(struct kvm * kvm)1141 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1142 {
1143 	struct kvm_memory_slot *ms;
1144 	struct kvm_memslots *slots;
1145 	unsigned long ram_pages = 0;
1146 	int bkt;
1147 
1148 	/* migration mode already enabled */
1149 	if (kvm->arch.migration_mode)
1150 		return 0;
1151 	slots = kvm_memslots(kvm);
1152 	if (!slots || kvm_memslots_empty(slots))
1153 		return -EINVAL;
1154 
1155 	if (!kvm->arch.use_cmma) {
1156 		kvm->arch.migration_mode = 1;
1157 		return 0;
1158 	}
1159 	/* mark all the pages in active slots as dirty */
1160 	kvm_for_each_memslot(ms, bkt, slots) {
1161 		if (!ms->dirty_bitmap)
1162 			return -EINVAL;
1163 		/*
1164 		 * The second half of the bitmap is only used on x86,
1165 		 * and would be wasted otherwise, so we put it to good
1166 		 * use here to keep track of the state of the storage
1167 		 * attributes.
1168 		 */
1169 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1170 		ram_pages += ms->npages;
1171 	}
1172 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1173 	kvm->arch.migration_mode = 1;
1174 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1175 	return 0;
1176 }
1177 
1178 /*
1179  * Must be called with kvm->slots_lock to avoid races with ourselves and
1180  * kvm_s390_vm_start_migration.
1181  */
kvm_s390_vm_stop_migration(struct kvm * kvm)1182 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1183 {
1184 	/* migration mode already disabled */
1185 	if (!kvm->arch.migration_mode)
1186 		return 0;
1187 	kvm->arch.migration_mode = 0;
1188 	if (kvm->arch.use_cmma)
1189 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1190 	return 0;
1191 }
1192 
kvm_s390_vm_set_migration(struct kvm * kvm,struct kvm_device_attr * attr)1193 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1194 				     struct kvm_device_attr *attr)
1195 {
1196 	int res = -ENXIO;
1197 
1198 	mutex_lock(&kvm->slots_lock);
1199 	switch (attr->attr) {
1200 	case KVM_S390_VM_MIGRATION_START:
1201 		res = kvm_s390_vm_start_migration(kvm);
1202 		break;
1203 	case KVM_S390_VM_MIGRATION_STOP:
1204 		res = kvm_s390_vm_stop_migration(kvm);
1205 		break;
1206 	default:
1207 		break;
1208 	}
1209 	mutex_unlock(&kvm->slots_lock);
1210 
1211 	return res;
1212 }
1213 
kvm_s390_vm_get_migration(struct kvm * kvm,struct kvm_device_attr * attr)1214 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1215 				     struct kvm_device_attr *attr)
1216 {
1217 	u64 mig = kvm->arch.migration_mode;
1218 
1219 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1220 		return -ENXIO;
1221 
1222 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1223 		return -EFAULT;
1224 	return 0;
1225 }
1226 
1227 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1228 
kvm_s390_set_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr)1229 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1230 {
1231 	struct kvm_s390_vm_tod_clock gtod;
1232 
1233 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1234 		return -EFAULT;
1235 
1236 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1237 		return -EINVAL;
1238 	__kvm_s390_set_tod_clock(kvm, &gtod);
1239 
1240 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1241 		gtod.epoch_idx, gtod.tod);
1242 
1243 	return 0;
1244 }
1245 
kvm_s390_set_tod_high(struct kvm * kvm,struct kvm_device_attr * attr)1246 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1247 {
1248 	u8 gtod_high;
1249 
1250 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1251 					   sizeof(gtod_high)))
1252 		return -EFAULT;
1253 
1254 	if (gtod_high != 0)
1255 		return -EINVAL;
1256 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1257 
1258 	return 0;
1259 }
1260 
kvm_s390_set_tod_low(struct kvm * kvm,struct kvm_device_attr * attr)1261 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1262 {
1263 	struct kvm_s390_vm_tod_clock gtod = { 0 };
1264 
1265 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1266 			   sizeof(gtod.tod)))
1267 		return -EFAULT;
1268 
1269 	__kvm_s390_set_tod_clock(kvm, &gtod);
1270 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1271 	return 0;
1272 }
1273 
kvm_s390_set_tod(struct kvm * kvm,struct kvm_device_attr * attr)1274 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1275 {
1276 	int ret;
1277 
1278 	if (attr->flags)
1279 		return -EINVAL;
1280 
1281 	mutex_lock(&kvm->lock);
1282 	/*
1283 	 * For protected guests, the TOD is managed by the ultravisor, so trying
1284 	 * to change it will never bring the expected results.
1285 	 */
1286 	if (kvm_s390_pv_is_protected(kvm)) {
1287 		ret = -EOPNOTSUPP;
1288 		goto out_unlock;
1289 	}
1290 
1291 	switch (attr->attr) {
1292 	case KVM_S390_VM_TOD_EXT:
1293 		ret = kvm_s390_set_tod_ext(kvm, attr);
1294 		break;
1295 	case KVM_S390_VM_TOD_HIGH:
1296 		ret = kvm_s390_set_tod_high(kvm, attr);
1297 		break;
1298 	case KVM_S390_VM_TOD_LOW:
1299 		ret = kvm_s390_set_tod_low(kvm, attr);
1300 		break;
1301 	default:
1302 		ret = -ENXIO;
1303 		break;
1304 	}
1305 
1306 out_unlock:
1307 	mutex_unlock(&kvm->lock);
1308 	return ret;
1309 }
1310 
kvm_s390_get_tod_clock(struct kvm * kvm,struct kvm_s390_vm_tod_clock * gtod)1311 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1312 				   struct kvm_s390_vm_tod_clock *gtod)
1313 {
1314 	union tod_clock clk;
1315 
1316 	preempt_disable();
1317 
1318 	store_tod_clock_ext(&clk);
1319 
1320 	gtod->tod = clk.tod + kvm->arch.epoch;
1321 	gtod->epoch_idx = 0;
1322 	if (test_kvm_facility(kvm, 139)) {
1323 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1324 		if (gtod->tod < clk.tod)
1325 			gtod->epoch_idx += 1;
1326 	}
1327 
1328 	preempt_enable();
1329 }
1330 
kvm_s390_get_tod_ext(struct kvm * kvm,struct kvm_device_attr * attr)1331 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1332 {
1333 	struct kvm_s390_vm_tod_clock gtod;
1334 
1335 	memset(&gtod, 0, sizeof(gtod));
1336 	kvm_s390_get_tod_clock(kvm, &gtod);
1337 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1338 		return -EFAULT;
1339 
1340 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1341 		gtod.epoch_idx, gtod.tod);
1342 	return 0;
1343 }
1344 
kvm_s390_get_tod_high(struct kvm * kvm,struct kvm_device_attr * attr)1345 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1346 {
1347 	u8 gtod_high = 0;
1348 
1349 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
1350 					 sizeof(gtod_high)))
1351 		return -EFAULT;
1352 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1353 
1354 	return 0;
1355 }
1356 
kvm_s390_get_tod_low(struct kvm * kvm,struct kvm_device_attr * attr)1357 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1358 {
1359 	u64 gtod;
1360 
1361 	gtod = kvm_s390_get_tod_clock_fast(kvm);
1362 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1363 		return -EFAULT;
1364 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1365 
1366 	return 0;
1367 }
1368 
kvm_s390_get_tod(struct kvm * kvm,struct kvm_device_attr * attr)1369 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1370 {
1371 	int ret;
1372 
1373 	if (attr->flags)
1374 		return -EINVAL;
1375 
1376 	switch (attr->attr) {
1377 	case KVM_S390_VM_TOD_EXT:
1378 		ret = kvm_s390_get_tod_ext(kvm, attr);
1379 		break;
1380 	case KVM_S390_VM_TOD_HIGH:
1381 		ret = kvm_s390_get_tod_high(kvm, attr);
1382 		break;
1383 	case KVM_S390_VM_TOD_LOW:
1384 		ret = kvm_s390_get_tod_low(kvm, attr);
1385 		break;
1386 	default:
1387 		ret = -ENXIO;
1388 		break;
1389 	}
1390 	return ret;
1391 }
1392 
kvm_s390_set_processor(struct kvm * kvm,struct kvm_device_attr * attr)1393 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1394 {
1395 	struct kvm_s390_vm_cpu_processor *proc;
1396 	u16 lowest_ibc, unblocked_ibc;
1397 	int ret = 0;
1398 
1399 	mutex_lock(&kvm->lock);
1400 	if (kvm->created_vcpus) {
1401 		ret = -EBUSY;
1402 		goto out;
1403 	}
1404 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1405 	if (!proc) {
1406 		ret = -ENOMEM;
1407 		goto out;
1408 	}
1409 	if (!copy_from_user(proc, (void __user *)attr->addr,
1410 			    sizeof(*proc))) {
1411 		kvm->arch.model.cpuid = proc->cpuid;
1412 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1413 		unblocked_ibc = sclp.ibc & 0xfff;
1414 		if (lowest_ibc && proc->ibc) {
1415 			if (proc->ibc > unblocked_ibc)
1416 				kvm->arch.model.ibc = unblocked_ibc;
1417 			else if (proc->ibc < lowest_ibc)
1418 				kvm->arch.model.ibc = lowest_ibc;
1419 			else
1420 				kvm->arch.model.ibc = proc->ibc;
1421 		}
1422 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1423 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1424 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1425 			 kvm->arch.model.ibc,
1426 			 kvm->arch.model.cpuid);
1427 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1428 			 kvm->arch.model.fac_list[0],
1429 			 kvm->arch.model.fac_list[1],
1430 			 kvm->arch.model.fac_list[2]);
1431 	} else
1432 		ret = -EFAULT;
1433 	kfree(proc);
1434 out:
1435 	mutex_unlock(&kvm->lock);
1436 	return ret;
1437 }
1438 
kvm_s390_set_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr)1439 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1440 				       struct kvm_device_attr *attr)
1441 {
1442 	struct kvm_s390_vm_cpu_feat data;
1443 
1444 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1445 		return -EFAULT;
1446 	if (!bitmap_subset((unsigned long *) data.feat,
1447 			   kvm_s390_available_cpu_feat,
1448 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
1449 		return -EINVAL;
1450 
1451 	mutex_lock(&kvm->lock);
1452 	if (kvm->created_vcpus) {
1453 		mutex_unlock(&kvm->lock);
1454 		return -EBUSY;
1455 	}
1456 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1457 	mutex_unlock(&kvm->lock);
1458 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1459 			 data.feat[0],
1460 			 data.feat[1],
1461 			 data.feat[2]);
1462 	return 0;
1463 }
1464 
kvm_s390_set_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1465 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1466 					  struct kvm_device_attr *attr)
1467 {
1468 	mutex_lock(&kvm->lock);
1469 	if (kvm->created_vcpus) {
1470 		mutex_unlock(&kvm->lock);
1471 		return -EBUSY;
1472 	}
1473 
1474 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1475 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1476 		mutex_unlock(&kvm->lock);
1477 		return -EFAULT;
1478 	}
1479 	mutex_unlock(&kvm->lock);
1480 
1481 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1482 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1483 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1484 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1485 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1486 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1487 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1488 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1489 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1490 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1491 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1492 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1493 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1494 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1495 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1496 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1497 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1498 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1499 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1500 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1501 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1502 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1503 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1504 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1505 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1506 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1507 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1508 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1509 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1510 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1511 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1512 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1513 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1514 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1515 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1516 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1517 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1518 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1519 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1520 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1521 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1522 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1523 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1524 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1525 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1526 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1527 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1528 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1529 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1530 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1531 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1532 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1533 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1534 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1535 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1536 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1537 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1538 
1539 	return 0;
1540 }
1541 
1542 #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK	\
1543 (						\
1544 	((struct kvm_s390_vm_cpu_uv_feat){	\
1545 		.ap = 1,			\
1546 		.ap_intr = 1,			\
1547 	})					\
1548 	.feat					\
1549 )
1550 
kvm_s390_set_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1551 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1552 {
1553 	struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1554 	unsigned long data, filter;
1555 
1556 	filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1557 	if (get_user(data, &ptr->feat))
1558 		return -EFAULT;
1559 	if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1560 		return -EINVAL;
1561 
1562 	mutex_lock(&kvm->lock);
1563 	if (kvm->created_vcpus) {
1564 		mutex_unlock(&kvm->lock);
1565 		return -EBUSY;
1566 	}
1567 	kvm->arch.model.uv_feat_guest.feat = data;
1568 	mutex_unlock(&kvm->lock);
1569 
1570 	VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1571 
1572 	return 0;
1573 }
1574 
kvm_s390_set_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr)1575 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1576 {
1577 	int ret = -ENXIO;
1578 
1579 	switch (attr->attr) {
1580 	case KVM_S390_VM_CPU_PROCESSOR:
1581 		ret = kvm_s390_set_processor(kvm, attr);
1582 		break;
1583 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1584 		ret = kvm_s390_set_processor_feat(kvm, attr);
1585 		break;
1586 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1587 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
1588 		break;
1589 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1590 		ret = kvm_s390_set_uv_feat(kvm, attr);
1591 		break;
1592 	}
1593 	return ret;
1594 }
1595 
kvm_s390_get_processor(struct kvm * kvm,struct kvm_device_attr * attr)1596 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1597 {
1598 	struct kvm_s390_vm_cpu_processor *proc;
1599 	int ret = 0;
1600 
1601 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1602 	if (!proc) {
1603 		ret = -ENOMEM;
1604 		goto out;
1605 	}
1606 	proc->cpuid = kvm->arch.model.cpuid;
1607 	proc->ibc = kvm->arch.model.ibc;
1608 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1609 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1610 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1611 		 kvm->arch.model.ibc,
1612 		 kvm->arch.model.cpuid);
1613 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1614 		 kvm->arch.model.fac_list[0],
1615 		 kvm->arch.model.fac_list[1],
1616 		 kvm->arch.model.fac_list[2]);
1617 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1618 		ret = -EFAULT;
1619 	kfree(proc);
1620 out:
1621 	return ret;
1622 }
1623 
kvm_s390_get_machine(struct kvm * kvm,struct kvm_device_attr * attr)1624 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1625 {
1626 	struct kvm_s390_vm_cpu_machine *mach;
1627 	int ret = 0;
1628 
1629 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1630 	if (!mach) {
1631 		ret = -ENOMEM;
1632 		goto out;
1633 	}
1634 	get_cpu_id((struct cpuid *) &mach->cpuid);
1635 	mach->ibc = sclp.ibc;
1636 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1637 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1638 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1639 	       sizeof(stfle_fac_list));
1640 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1641 		 kvm->arch.model.ibc,
1642 		 kvm->arch.model.cpuid);
1643 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1644 		 mach->fac_mask[0],
1645 		 mach->fac_mask[1],
1646 		 mach->fac_mask[2]);
1647 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1648 		 mach->fac_list[0],
1649 		 mach->fac_list[1],
1650 		 mach->fac_list[2]);
1651 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1652 		ret = -EFAULT;
1653 	kfree(mach);
1654 out:
1655 	return ret;
1656 }
1657 
kvm_s390_get_processor_feat(struct kvm * kvm,struct kvm_device_attr * attr)1658 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1659 				       struct kvm_device_attr *attr)
1660 {
1661 	struct kvm_s390_vm_cpu_feat data;
1662 
1663 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1664 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1665 		return -EFAULT;
1666 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1667 			 data.feat[0],
1668 			 data.feat[1],
1669 			 data.feat[2]);
1670 	return 0;
1671 }
1672 
kvm_s390_get_machine_feat(struct kvm * kvm,struct kvm_device_attr * attr)1673 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1674 				     struct kvm_device_attr *attr)
1675 {
1676 	struct kvm_s390_vm_cpu_feat data;
1677 
1678 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1679 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1680 		return -EFAULT;
1681 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
1682 			 data.feat[0],
1683 			 data.feat[1],
1684 			 data.feat[2]);
1685 	return 0;
1686 }
1687 
kvm_s390_get_processor_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1688 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1689 					  struct kvm_device_attr *attr)
1690 {
1691 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1692 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1693 		return -EFAULT;
1694 
1695 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1696 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1697 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1698 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1699 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1700 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
1701 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1702 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1703 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
1704 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1705 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1706 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
1707 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1708 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1709 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
1710 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1711 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1712 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
1713 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1714 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1715 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
1716 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1717 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1718 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
1719 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1720 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1721 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
1722 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1723 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1724 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
1725 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1726 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1727 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
1728 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1729 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1730 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
1731 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1732 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1733 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
1734 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1735 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1736 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
1737 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1738 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1739 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
1740 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1741 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1742 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1743 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1744 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1745 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1746 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1747 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1748 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1749 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1750 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1751 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1752 
1753 	return 0;
1754 }
1755 
kvm_s390_get_machine_subfunc(struct kvm * kvm,struct kvm_device_attr * attr)1756 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1757 					struct kvm_device_attr *attr)
1758 {
1759 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1760 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1761 		return -EFAULT;
1762 
1763 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1764 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1765 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1766 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1767 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1768 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
1769 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1770 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1771 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
1772 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1773 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1774 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
1775 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1776 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1777 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
1778 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1779 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1780 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
1781 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1782 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1783 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
1784 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1785 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1786 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
1787 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1788 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1789 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
1790 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1791 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1792 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
1793 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1794 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1795 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
1796 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1797 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1798 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
1799 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1800 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1801 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
1802 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1803 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1804 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
1805 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1806 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1807 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
1808 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1809 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1810 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1811 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1812 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1813 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1814 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1815 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1816 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1817 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1818 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1819 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1820 
1821 	return 0;
1822 }
1823 
kvm_s390_get_processor_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1824 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1825 {
1826 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1827 	unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1828 
1829 	if (put_user(feat, &dst->feat))
1830 		return -EFAULT;
1831 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1832 
1833 	return 0;
1834 }
1835 
kvm_s390_get_machine_uv_feat(struct kvm * kvm,struct kvm_device_attr * attr)1836 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1837 {
1838 	struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1839 	unsigned long feat;
1840 
1841 	BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1842 
1843 	feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1844 	if (put_user(feat, &dst->feat))
1845 		return -EFAULT;
1846 	VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1847 
1848 	return 0;
1849 }
1850 
kvm_s390_get_cpu_model(struct kvm * kvm,struct kvm_device_attr * attr)1851 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1852 {
1853 	int ret = -ENXIO;
1854 
1855 	switch (attr->attr) {
1856 	case KVM_S390_VM_CPU_PROCESSOR:
1857 		ret = kvm_s390_get_processor(kvm, attr);
1858 		break;
1859 	case KVM_S390_VM_CPU_MACHINE:
1860 		ret = kvm_s390_get_machine(kvm, attr);
1861 		break;
1862 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1863 		ret = kvm_s390_get_processor_feat(kvm, attr);
1864 		break;
1865 	case KVM_S390_VM_CPU_MACHINE_FEAT:
1866 		ret = kvm_s390_get_machine_feat(kvm, attr);
1867 		break;
1868 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1869 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
1870 		break;
1871 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1872 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
1873 		break;
1874 	case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1875 		ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1876 		break;
1877 	case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1878 		ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1879 		break;
1880 	}
1881 	return ret;
1882 }
1883 
1884 /**
1885  * kvm_s390_update_topology_change_report - update CPU topology change report
1886  * @kvm: guest KVM description
1887  * @val: set or clear the MTCR bit
1888  *
1889  * Updates the Multiprocessor Topology-Change-Report bit to signal
1890  * the guest with a topology change.
1891  * This is only relevant if the topology facility is present.
1892  *
1893  * The SCA version, bsca or esca, doesn't matter as offset is the same.
1894  */
kvm_s390_update_topology_change_report(struct kvm * kvm,bool val)1895 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1896 {
1897 	union sca_utility new, old;
1898 	struct bsca_block *sca;
1899 
1900 	read_lock(&kvm->arch.sca_lock);
1901 	sca = kvm->arch.sca;
1902 	do {
1903 		old = READ_ONCE(sca->utility);
1904 		new = old;
1905 		new.mtcr = val;
1906 	} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
1907 	read_unlock(&kvm->arch.sca_lock);
1908 }
1909 
kvm_s390_set_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr)1910 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1911 					       struct kvm_device_attr *attr)
1912 {
1913 	if (!test_kvm_facility(kvm, 11))
1914 		return -ENXIO;
1915 
1916 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1917 	return 0;
1918 }
1919 
kvm_s390_get_topo_change_indication(struct kvm * kvm,struct kvm_device_attr * attr)1920 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1921 					       struct kvm_device_attr *attr)
1922 {
1923 	u8 topo;
1924 
1925 	if (!test_kvm_facility(kvm, 11))
1926 		return -ENXIO;
1927 
1928 	read_lock(&kvm->arch.sca_lock);
1929 	topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1930 	read_unlock(&kvm->arch.sca_lock);
1931 
1932 	return put_user(topo, (u8 __user *)attr->addr);
1933 }
1934 
kvm_s390_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)1935 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1936 {
1937 	int ret;
1938 
1939 	switch (attr->group) {
1940 	case KVM_S390_VM_MEM_CTRL:
1941 		ret = kvm_s390_set_mem_control(kvm, attr);
1942 		break;
1943 	case KVM_S390_VM_TOD:
1944 		ret = kvm_s390_set_tod(kvm, attr);
1945 		break;
1946 	case KVM_S390_VM_CPU_MODEL:
1947 		ret = kvm_s390_set_cpu_model(kvm, attr);
1948 		break;
1949 	case KVM_S390_VM_CRYPTO:
1950 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1951 		break;
1952 	case KVM_S390_VM_MIGRATION:
1953 		ret = kvm_s390_vm_set_migration(kvm, attr);
1954 		break;
1955 	case KVM_S390_VM_CPU_TOPOLOGY:
1956 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1957 		break;
1958 	default:
1959 		ret = -ENXIO;
1960 		break;
1961 	}
1962 
1963 	return ret;
1964 }
1965 
kvm_s390_vm_get_attr(struct kvm * kvm,struct kvm_device_attr * attr)1966 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1967 {
1968 	int ret;
1969 
1970 	switch (attr->group) {
1971 	case KVM_S390_VM_MEM_CTRL:
1972 		ret = kvm_s390_get_mem_control(kvm, attr);
1973 		break;
1974 	case KVM_S390_VM_TOD:
1975 		ret = kvm_s390_get_tod(kvm, attr);
1976 		break;
1977 	case KVM_S390_VM_CPU_MODEL:
1978 		ret = kvm_s390_get_cpu_model(kvm, attr);
1979 		break;
1980 	case KVM_S390_VM_MIGRATION:
1981 		ret = kvm_s390_vm_get_migration(kvm, attr);
1982 		break;
1983 	case KVM_S390_VM_CPU_TOPOLOGY:
1984 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
1985 		break;
1986 	default:
1987 		ret = -ENXIO;
1988 		break;
1989 	}
1990 
1991 	return ret;
1992 }
1993 
kvm_s390_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)1994 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1995 {
1996 	int ret;
1997 
1998 	switch (attr->group) {
1999 	case KVM_S390_VM_MEM_CTRL:
2000 		switch (attr->attr) {
2001 		case KVM_S390_VM_MEM_ENABLE_CMMA:
2002 		case KVM_S390_VM_MEM_CLR_CMMA:
2003 			ret = sclp.has_cmma ? 0 : -ENXIO;
2004 			break;
2005 		case KVM_S390_VM_MEM_LIMIT_SIZE:
2006 			ret = 0;
2007 			break;
2008 		default:
2009 			ret = -ENXIO;
2010 			break;
2011 		}
2012 		break;
2013 	case KVM_S390_VM_TOD:
2014 		switch (attr->attr) {
2015 		case KVM_S390_VM_TOD_LOW:
2016 		case KVM_S390_VM_TOD_HIGH:
2017 			ret = 0;
2018 			break;
2019 		default:
2020 			ret = -ENXIO;
2021 			break;
2022 		}
2023 		break;
2024 	case KVM_S390_VM_CPU_MODEL:
2025 		switch (attr->attr) {
2026 		case KVM_S390_VM_CPU_PROCESSOR:
2027 		case KVM_S390_VM_CPU_MACHINE:
2028 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2029 		case KVM_S390_VM_CPU_MACHINE_FEAT:
2030 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2031 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2032 		case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2033 		case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2034 			ret = 0;
2035 			break;
2036 		default:
2037 			ret = -ENXIO;
2038 			break;
2039 		}
2040 		break;
2041 	case KVM_S390_VM_CRYPTO:
2042 		switch (attr->attr) {
2043 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2044 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2045 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2046 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2047 			ret = 0;
2048 			break;
2049 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2050 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2051 			ret = ap_instructions_available() ? 0 : -ENXIO;
2052 			break;
2053 		default:
2054 			ret = -ENXIO;
2055 			break;
2056 		}
2057 		break;
2058 	case KVM_S390_VM_MIGRATION:
2059 		ret = 0;
2060 		break;
2061 	case KVM_S390_VM_CPU_TOPOLOGY:
2062 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2063 		break;
2064 	default:
2065 		ret = -ENXIO;
2066 		break;
2067 	}
2068 
2069 	return ret;
2070 }
2071 
kvm_s390_get_skeys(struct kvm * kvm,struct kvm_s390_skeys * args)2072 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2073 {
2074 	uint8_t *keys;
2075 	uint64_t hva;
2076 	int srcu_idx, i, r = 0;
2077 
2078 	if (args->flags != 0)
2079 		return -EINVAL;
2080 
2081 	/* Is this guest using storage keys? */
2082 	if (!mm_uses_skeys(current->mm))
2083 		return KVM_S390_GET_SKEYS_NONE;
2084 
2085 	/* Enforce sane limit on memory allocation */
2086 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2087 		return -EINVAL;
2088 
2089 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2090 	if (!keys)
2091 		return -ENOMEM;
2092 
2093 	mmap_read_lock(current->mm);
2094 	srcu_idx = srcu_read_lock(&kvm->srcu);
2095 	for (i = 0; i < args->count; i++) {
2096 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2097 		if (kvm_is_error_hva(hva)) {
2098 			r = -EFAULT;
2099 			break;
2100 		}
2101 
2102 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
2103 		if (r)
2104 			break;
2105 	}
2106 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2107 	mmap_read_unlock(current->mm);
2108 
2109 	if (!r) {
2110 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2111 				 sizeof(uint8_t) * args->count);
2112 		if (r)
2113 			r = -EFAULT;
2114 	}
2115 
2116 	kvfree(keys);
2117 	return r;
2118 }
2119 
kvm_s390_set_skeys(struct kvm * kvm,struct kvm_s390_skeys * args)2120 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2121 {
2122 	uint8_t *keys;
2123 	uint64_t hva;
2124 	int srcu_idx, i, r = 0;
2125 	bool unlocked;
2126 
2127 	if (args->flags != 0)
2128 		return -EINVAL;
2129 
2130 	/* Enforce sane limit on memory allocation */
2131 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2132 		return -EINVAL;
2133 
2134 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2135 	if (!keys)
2136 		return -ENOMEM;
2137 
2138 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2139 			   sizeof(uint8_t) * args->count);
2140 	if (r) {
2141 		r = -EFAULT;
2142 		goto out;
2143 	}
2144 
2145 	/* Enable storage key handling for the guest */
2146 	r = s390_enable_skey();
2147 	if (r)
2148 		goto out;
2149 
2150 	i = 0;
2151 	mmap_read_lock(current->mm);
2152 	srcu_idx = srcu_read_lock(&kvm->srcu);
2153         while (i < args->count) {
2154 		unlocked = false;
2155 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2156 		if (kvm_is_error_hva(hva)) {
2157 			r = -EFAULT;
2158 			break;
2159 		}
2160 
2161 		/* Lowest order bit is reserved */
2162 		if (keys[i] & 0x01) {
2163 			r = -EINVAL;
2164 			break;
2165 		}
2166 
2167 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2168 		if (r) {
2169 			r = fixup_user_fault(current->mm, hva,
2170 					     FAULT_FLAG_WRITE, &unlocked);
2171 			if (r)
2172 				break;
2173 		}
2174 		if (!r)
2175 			i++;
2176 	}
2177 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2178 	mmap_read_unlock(current->mm);
2179 out:
2180 	kvfree(keys);
2181 	return r;
2182 }
2183 
2184 /*
2185  * Base address and length must be sent at the start of each block, therefore
2186  * it's cheaper to send some clean data, as long as it's less than the size of
2187  * two longs.
2188  */
2189 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2190 /* for consistency */
2191 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2192 
kvm_s390_peek_cmma(struct kvm * kvm,struct kvm_s390_cmma_log * args,u8 * res,unsigned long bufsize)2193 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2194 			      u8 *res, unsigned long bufsize)
2195 {
2196 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2197 
2198 	args->count = 0;
2199 	while (args->count < bufsize) {
2200 		hva = gfn_to_hva(kvm, cur_gfn);
2201 		/*
2202 		 * We return an error if the first value was invalid, but we
2203 		 * return successfully if at least one value was copied.
2204 		 */
2205 		if (kvm_is_error_hva(hva))
2206 			return args->count ? 0 : -EFAULT;
2207 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2208 			pgstev = 0;
2209 		res[args->count++] = (pgstev >> 24) & 0x43;
2210 		cur_gfn++;
2211 	}
2212 
2213 	return 0;
2214 }
2215 
gfn_to_memslot_approx(struct kvm_memslots * slots,gfn_t gfn)2216 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2217 						     gfn_t gfn)
2218 {
2219 	return ____gfn_to_memslot(slots, gfn, true);
2220 }
2221 
kvm_s390_next_dirty_cmma(struct kvm_memslots * slots,unsigned long cur_gfn)2222 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2223 					      unsigned long cur_gfn)
2224 {
2225 	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2226 	unsigned long ofs = cur_gfn - ms->base_gfn;
2227 	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2228 
2229 	if (ms->base_gfn + ms->npages <= cur_gfn) {
2230 		mnode = rb_next(mnode);
2231 		/* If we are above the highest slot, wrap around */
2232 		if (!mnode)
2233 			mnode = rb_first(&slots->gfn_tree);
2234 
2235 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2236 		ofs = 0;
2237 	}
2238 
2239 	if (cur_gfn < ms->base_gfn)
2240 		ofs = 0;
2241 
2242 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2243 	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2244 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2245 		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2246 	}
2247 	return ms->base_gfn + ofs;
2248 }
2249 
kvm_s390_get_cmma(struct kvm * kvm,struct kvm_s390_cmma_log * args,u8 * res,unsigned long bufsize)2250 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2251 			     u8 *res, unsigned long bufsize)
2252 {
2253 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2254 	struct kvm_memslots *slots = kvm_memslots(kvm);
2255 	struct kvm_memory_slot *ms;
2256 
2257 	if (unlikely(kvm_memslots_empty(slots)))
2258 		return 0;
2259 
2260 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2261 	ms = gfn_to_memslot(kvm, cur_gfn);
2262 	args->count = 0;
2263 	args->start_gfn = cur_gfn;
2264 	if (!ms)
2265 		return 0;
2266 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2267 	mem_end = kvm_s390_get_gfn_end(slots);
2268 
2269 	while (args->count < bufsize) {
2270 		hva = gfn_to_hva(kvm, cur_gfn);
2271 		if (kvm_is_error_hva(hva))
2272 			return 0;
2273 		/* Decrement only if we actually flipped the bit to 0 */
2274 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2275 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2276 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2277 			pgstev = 0;
2278 		/* Save the value */
2279 		res[args->count++] = (pgstev >> 24) & 0x43;
2280 		/* If the next bit is too far away, stop. */
2281 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2282 			return 0;
2283 		/* If we reached the previous "next", find the next one */
2284 		if (cur_gfn == next_gfn)
2285 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2286 		/* Reached the end of memory or of the buffer, stop */
2287 		if ((next_gfn >= mem_end) ||
2288 		    (next_gfn - args->start_gfn >= bufsize))
2289 			return 0;
2290 		cur_gfn++;
2291 		/* Reached the end of the current memslot, take the next one. */
2292 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2293 			ms = gfn_to_memslot(kvm, cur_gfn);
2294 			if (!ms)
2295 				return 0;
2296 		}
2297 	}
2298 	return 0;
2299 }
2300 
2301 /*
2302  * This function searches for the next page with dirty CMMA attributes, and
2303  * saves the attributes in the buffer up to either the end of the buffer or
2304  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2305  * no trailing clean bytes are saved.
2306  * In case no dirty bits were found, or if CMMA was not enabled or used, the
2307  * output buffer will indicate 0 as length.
2308  */
kvm_s390_get_cmma_bits(struct kvm * kvm,struct kvm_s390_cmma_log * args)2309 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2310 				  struct kvm_s390_cmma_log *args)
2311 {
2312 	unsigned long bufsize;
2313 	int srcu_idx, peek, ret;
2314 	u8 *values;
2315 
2316 	if (!kvm->arch.use_cmma)
2317 		return -ENXIO;
2318 	/* Invalid/unsupported flags were specified */
2319 	if (args->flags & ~KVM_S390_CMMA_PEEK)
2320 		return -EINVAL;
2321 	/* Migration mode query, and we are not doing a migration */
2322 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2323 	if (!peek && !kvm->arch.migration_mode)
2324 		return -EINVAL;
2325 	/* CMMA is disabled or was not used, or the buffer has length zero */
2326 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2327 	if (!bufsize || !kvm->mm->context.uses_cmm) {
2328 		memset(args, 0, sizeof(*args));
2329 		return 0;
2330 	}
2331 	/* We are not peeking, and there are no dirty pages */
2332 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2333 		memset(args, 0, sizeof(*args));
2334 		return 0;
2335 	}
2336 
2337 	values = vmalloc(bufsize);
2338 	if (!values)
2339 		return -ENOMEM;
2340 
2341 	mmap_read_lock(kvm->mm);
2342 	srcu_idx = srcu_read_lock(&kvm->srcu);
2343 	if (peek)
2344 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2345 	else
2346 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2347 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2348 	mmap_read_unlock(kvm->mm);
2349 
2350 	if (kvm->arch.migration_mode)
2351 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2352 	else
2353 		args->remaining = 0;
2354 
2355 	if (copy_to_user((void __user *)args->values, values, args->count))
2356 		ret = -EFAULT;
2357 
2358 	vfree(values);
2359 	return ret;
2360 }
2361 
2362 /*
2363  * This function sets the CMMA attributes for the given pages. If the input
2364  * buffer has zero length, no action is taken, otherwise the attributes are
2365  * set and the mm->context.uses_cmm flag is set.
2366  */
kvm_s390_set_cmma_bits(struct kvm * kvm,const struct kvm_s390_cmma_log * args)2367 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2368 				  const struct kvm_s390_cmma_log *args)
2369 {
2370 	unsigned long hva, mask, pgstev, i;
2371 	uint8_t *bits;
2372 	int srcu_idx, r = 0;
2373 
2374 	mask = args->mask;
2375 
2376 	if (!kvm->arch.use_cmma)
2377 		return -ENXIO;
2378 	/* invalid/unsupported flags */
2379 	if (args->flags != 0)
2380 		return -EINVAL;
2381 	/* Enforce sane limit on memory allocation */
2382 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
2383 		return -EINVAL;
2384 	/* Nothing to do */
2385 	if (args->count == 0)
2386 		return 0;
2387 
2388 	bits = vmalloc(array_size(sizeof(*bits), args->count));
2389 	if (!bits)
2390 		return -ENOMEM;
2391 
2392 	r = copy_from_user(bits, (void __user *)args->values, args->count);
2393 	if (r) {
2394 		r = -EFAULT;
2395 		goto out;
2396 	}
2397 
2398 	mmap_read_lock(kvm->mm);
2399 	srcu_idx = srcu_read_lock(&kvm->srcu);
2400 	for (i = 0; i < args->count; i++) {
2401 		hva = gfn_to_hva(kvm, args->start_gfn + i);
2402 		if (kvm_is_error_hva(hva)) {
2403 			r = -EFAULT;
2404 			break;
2405 		}
2406 
2407 		pgstev = bits[i];
2408 		pgstev = pgstev << 24;
2409 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2410 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
2411 	}
2412 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2413 	mmap_read_unlock(kvm->mm);
2414 
2415 	if (!kvm->mm->context.uses_cmm) {
2416 		mmap_write_lock(kvm->mm);
2417 		kvm->mm->context.uses_cmm = 1;
2418 		mmap_write_unlock(kvm->mm);
2419 	}
2420 out:
2421 	vfree(bits);
2422 	return r;
2423 }
2424 
2425 /**
2426  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2427  * non protected.
2428  * @kvm: the VM whose protected vCPUs are to be converted
2429  * @rc: return value for the RC field of the UVC (in case of error)
2430  * @rrc: return value for the RRC field of the UVC (in case of error)
2431  *
2432  * Does not stop in case of error, tries to convert as many
2433  * CPUs as possible. In case of error, the RC and RRC of the last error are
2434  * returned.
2435  *
2436  * Return: 0 in case of success, otherwise -EIO
2437  */
kvm_s390_cpus_from_pv(struct kvm * kvm,u16 * rc,u16 * rrc)2438 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2439 {
2440 	struct kvm_vcpu *vcpu;
2441 	unsigned long i;
2442 	u16 _rc, _rrc;
2443 	int ret = 0;
2444 
2445 	/*
2446 	 * We ignore failures and try to destroy as many CPUs as possible.
2447 	 * At the same time we must not free the assigned resources when
2448 	 * this fails, as the ultravisor has still access to that memory.
2449 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2450 	 * behind.
2451 	 * We want to return the first failure rc and rrc, though.
2452 	 */
2453 	kvm_for_each_vcpu(i, vcpu, kvm) {
2454 		mutex_lock(&vcpu->mutex);
2455 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2456 			*rc = _rc;
2457 			*rrc = _rrc;
2458 			ret = -EIO;
2459 		}
2460 		mutex_unlock(&vcpu->mutex);
2461 	}
2462 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2463 	if (use_gisa)
2464 		kvm_s390_gisa_enable(kvm);
2465 	return ret;
2466 }
2467 
2468 /**
2469  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2470  * to protected.
2471  * @kvm: the VM whose protected vCPUs are to be converted
2472  * @rc: return value for the RC field of the UVC (in case of error)
2473  * @rrc: return value for the RRC field of the UVC (in case of error)
2474  *
2475  * Tries to undo the conversion in case of error.
2476  *
2477  * Return: 0 in case of success, otherwise -EIO
2478  */
kvm_s390_cpus_to_pv(struct kvm * kvm,u16 * rc,u16 * rrc)2479 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2480 {
2481 	unsigned long i;
2482 	int r = 0;
2483 	u16 dummy;
2484 
2485 	struct kvm_vcpu *vcpu;
2486 
2487 	/* Disable the GISA if the ultravisor does not support AIV. */
2488 	if (!uv_has_feature(BIT_UV_FEAT_AIV))
2489 		kvm_s390_gisa_disable(kvm);
2490 
2491 	kvm_for_each_vcpu(i, vcpu, kvm) {
2492 		mutex_lock(&vcpu->mutex);
2493 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2494 		mutex_unlock(&vcpu->mutex);
2495 		if (r)
2496 			break;
2497 	}
2498 	if (r)
2499 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2500 	return r;
2501 }
2502 
2503 /*
2504  * Here we provide user space with a direct interface to query UV
2505  * related data like UV maxima and available features as well as
2506  * feature specific data.
2507  *
2508  * To facilitate future extension of the data structures we'll try to
2509  * write data up to the maximum requested length.
2510  */
kvm_s390_handle_pv_info(struct kvm_s390_pv_info * info)2511 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2512 {
2513 	ssize_t len_min;
2514 
2515 	switch (info->header.id) {
2516 	case KVM_PV_INFO_VM: {
2517 		len_min =  sizeof(info->header) + sizeof(info->vm);
2518 
2519 		if (info->header.len_max < len_min)
2520 			return -EINVAL;
2521 
2522 		memcpy(info->vm.inst_calls_list,
2523 		       uv_info.inst_calls_list,
2524 		       sizeof(uv_info.inst_calls_list));
2525 
2526 		/* It's max cpuid not max cpus, so it's off by one */
2527 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2528 		info->vm.max_guests = uv_info.max_num_sec_conf;
2529 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2530 		info->vm.feature_indication = uv_info.uv_feature_indications;
2531 
2532 		return len_min;
2533 	}
2534 	case KVM_PV_INFO_DUMP: {
2535 		len_min =  sizeof(info->header) + sizeof(info->dump);
2536 
2537 		if (info->header.len_max < len_min)
2538 			return -EINVAL;
2539 
2540 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2541 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2542 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2543 		return len_min;
2544 	}
2545 	default:
2546 		return -EINVAL;
2547 	}
2548 }
2549 
kvm_s390_pv_dmp(struct kvm * kvm,struct kvm_pv_cmd * cmd,struct kvm_s390_pv_dmp dmp)2550 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2551 			   struct kvm_s390_pv_dmp dmp)
2552 {
2553 	int r = -EINVAL;
2554 	void __user *result_buff = (void __user *)dmp.buff_addr;
2555 
2556 	switch (dmp.subcmd) {
2557 	case KVM_PV_DUMP_INIT: {
2558 		if (kvm->arch.pv.dumping)
2559 			break;
2560 
2561 		/*
2562 		 * Block SIE entry as concurrent dump UVCs could lead
2563 		 * to validities.
2564 		 */
2565 		kvm_s390_vcpu_block_all(kvm);
2566 
2567 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2568 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2569 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2570 			     cmd->rc, cmd->rrc);
2571 		if (!r) {
2572 			kvm->arch.pv.dumping = true;
2573 		} else {
2574 			kvm_s390_vcpu_unblock_all(kvm);
2575 			r = -EINVAL;
2576 		}
2577 		break;
2578 	}
2579 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2580 		if (!kvm->arch.pv.dumping)
2581 			break;
2582 
2583 		/*
2584 		 * gaddr is an output parameter since we might stop
2585 		 * early. As dmp will be copied back in our caller, we
2586 		 * don't need to do it ourselves.
2587 		 */
2588 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2589 						&cmd->rc, &cmd->rrc);
2590 		break;
2591 	}
2592 	case KVM_PV_DUMP_COMPLETE: {
2593 		if (!kvm->arch.pv.dumping)
2594 			break;
2595 
2596 		r = -EINVAL;
2597 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2598 			break;
2599 
2600 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
2601 					      &cmd->rc, &cmd->rrc);
2602 		break;
2603 	}
2604 	default:
2605 		r = -ENOTTY;
2606 		break;
2607 	}
2608 
2609 	return r;
2610 }
2611 
kvm_s390_handle_pv(struct kvm * kvm,struct kvm_pv_cmd * cmd)2612 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2613 {
2614 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2615 	void __user *argp = (void __user *)cmd->data;
2616 	int r = 0;
2617 	u16 dummy;
2618 
2619 	if (need_lock)
2620 		mutex_lock(&kvm->lock);
2621 
2622 	switch (cmd->cmd) {
2623 	case KVM_PV_ENABLE: {
2624 		r = -EINVAL;
2625 		if (kvm_s390_pv_is_protected(kvm))
2626 			break;
2627 
2628 		/*
2629 		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
2630 		 *  esca, we need no cleanup in the error cases below
2631 		 */
2632 		r = sca_switch_to_extended(kvm);
2633 		if (r)
2634 			break;
2635 
2636 		r = s390_disable_cow_sharing();
2637 		if (r)
2638 			break;
2639 
2640 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2641 		if (r)
2642 			break;
2643 
2644 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2645 		if (r)
2646 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2647 
2648 		/* we need to block service interrupts from now on */
2649 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2650 		break;
2651 	}
2652 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2653 		r = -EINVAL;
2654 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2655 			break;
2656 
2657 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2658 		/*
2659 		 * If a CPU could not be destroyed, destroy VM will also fail.
2660 		 * There is no point in trying to destroy it. Instead return
2661 		 * the rc and rrc from the first CPU that failed destroying.
2662 		 */
2663 		if (r)
2664 			break;
2665 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2666 
2667 		/* no need to block service interrupts any more */
2668 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2669 		break;
2670 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2671 		r = -EINVAL;
2672 		if (!async_destroy)
2673 			break;
2674 		/* kvm->lock must not be held; this is asserted inside the function. */
2675 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2676 		break;
2677 	case KVM_PV_DISABLE: {
2678 		r = -EINVAL;
2679 		if (!kvm_s390_pv_is_protected(kvm))
2680 			break;
2681 
2682 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2683 		/*
2684 		 * If a CPU could not be destroyed, destroy VM will also fail.
2685 		 * There is no point in trying to destroy it. Instead return
2686 		 * the rc and rrc from the first CPU that failed destroying.
2687 		 */
2688 		if (r)
2689 			break;
2690 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2691 
2692 		/* no need to block service interrupts any more */
2693 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2694 		break;
2695 	}
2696 	case KVM_PV_SET_SEC_PARMS: {
2697 		struct kvm_s390_pv_sec_parm parms = {};
2698 		void *hdr;
2699 
2700 		r = -EINVAL;
2701 		if (!kvm_s390_pv_is_protected(kvm))
2702 			break;
2703 
2704 		r = -EFAULT;
2705 		if (copy_from_user(&parms, argp, sizeof(parms)))
2706 			break;
2707 
2708 		/* Currently restricted to 8KB */
2709 		r = -EINVAL;
2710 		if (parms.length > PAGE_SIZE * 2)
2711 			break;
2712 
2713 		r = -ENOMEM;
2714 		hdr = vmalloc(parms.length);
2715 		if (!hdr)
2716 			break;
2717 
2718 		r = -EFAULT;
2719 		if (!copy_from_user(hdr, (void __user *)parms.origin,
2720 				    parms.length))
2721 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2722 						      &cmd->rc, &cmd->rrc);
2723 
2724 		vfree(hdr);
2725 		break;
2726 	}
2727 	case KVM_PV_UNPACK: {
2728 		struct kvm_s390_pv_unp unp = {};
2729 
2730 		r = -EINVAL;
2731 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2732 			break;
2733 
2734 		r = -EFAULT;
2735 		if (copy_from_user(&unp, argp, sizeof(unp)))
2736 			break;
2737 
2738 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2739 				       &cmd->rc, &cmd->rrc);
2740 		break;
2741 	}
2742 	case KVM_PV_VERIFY: {
2743 		r = -EINVAL;
2744 		if (!kvm_s390_pv_is_protected(kvm))
2745 			break;
2746 
2747 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2748 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2749 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2750 			     cmd->rrc);
2751 		break;
2752 	}
2753 	case KVM_PV_PREP_RESET: {
2754 		r = -EINVAL;
2755 		if (!kvm_s390_pv_is_protected(kvm))
2756 			break;
2757 
2758 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2759 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2760 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2761 			     cmd->rc, cmd->rrc);
2762 		break;
2763 	}
2764 	case KVM_PV_UNSHARE_ALL: {
2765 		r = -EINVAL;
2766 		if (!kvm_s390_pv_is_protected(kvm))
2767 			break;
2768 
2769 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2770 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2771 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2772 			     cmd->rc, cmd->rrc);
2773 		break;
2774 	}
2775 	case KVM_PV_INFO: {
2776 		struct kvm_s390_pv_info info = {};
2777 		ssize_t data_len;
2778 
2779 		/*
2780 		 * No need to check the VM protection here.
2781 		 *
2782 		 * Maybe user space wants to query some of the data
2783 		 * when the VM is still unprotected. If we see the
2784 		 * need to fence a new data command we can still
2785 		 * return an error in the info handler.
2786 		 */
2787 
2788 		r = -EFAULT;
2789 		if (copy_from_user(&info, argp, sizeof(info.header)))
2790 			break;
2791 
2792 		r = -EINVAL;
2793 		if (info.header.len_max < sizeof(info.header))
2794 			break;
2795 
2796 		data_len = kvm_s390_handle_pv_info(&info);
2797 		if (data_len < 0) {
2798 			r = data_len;
2799 			break;
2800 		}
2801 		/*
2802 		 * If a data command struct is extended (multiple
2803 		 * times) this can be used to determine how much of it
2804 		 * is valid.
2805 		 */
2806 		info.header.len_written = data_len;
2807 
2808 		r = -EFAULT;
2809 		if (copy_to_user(argp, &info, data_len))
2810 			break;
2811 
2812 		r = 0;
2813 		break;
2814 	}
2815 	case KVM_PV_DUMP: {
2816 		struct kvm_s390_pv_dmp dmp;
2817 
2818 		r = -EINVAL;
2819 		if (!kvm_s390_pv_is_protected(kvm))
2820 			break;
2821 
2822 		r = -EFAULT;
2823 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
2824 			break;
2825 
2826 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2827 		if (r)
2828 			break;
2829 
2830 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2831 			r = -EFAULT;
2832 			break;
2833 		}
2834 
2835 		break;
2836 	}
2837 	default:
2838 		r = -ENOTTY;
2839 	}
2840 	if (need_lock)
2841 		mutex_unlock(&kvm->lock);
2842 
2843 	return r;
2844 }
2845 
mem_op_validate_common(struct kvm_s390_mem_op * mop,u64 supported_flags)2846 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2847 {
2848 	if (mop->flags & ~supported_flags || !mop->size)
2849 		return -EINVAL;
2850 	if (mop->size > MEM_OP_MAX_SIZE)
2851 		return -E2BIG;
2852 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2853 		if (mop->key > 0xf)
2854 			return -EINVAL;
2855 	} else {
2856 		mop->key = 0;
2857 	}
2858 	return 0;
2859 }
2860 
kvm_s390_vm_mem_op_abs(struct kvm * kvm,struct kvm_s390_mem_op * mop)2861 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2862 {
2863 	void __user *uaddr = (void __user *)mop->buf;
2864 	enum gacc_mode acc_mode;
2865 	void *tmpbuf = NULL;
2866 	int r, srcu_idx;
2867 
2868 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2869 					KVM_S390_MEMOP_F_CHECK_ONLY);
2870 	if (r)
2871 		return r;
2872 
2873 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2874 		tmpbuf = vmalloc(mop->size);
2875 		if (!tmpbuf)
2876 			return -ENOMEM;
2877 	}
2878 
2879 	srcu_idx = srcu_read_lock(&kvm->srcu);
2880 
2881 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2882 		r = PGM_ADDRESSING;
2883 		goto out_unlock;
2884 	}
2885 
2886 	acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2887 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2888 		r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2889 		goto out_unlock;
2890 	}
2891 	if (acc_mode == GACC_FETCH) {
2892 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2893 					      mop->size, GACC_FETCH, mop->key);
2894 		if (r)
2895 			goto out_unlock;
2896 		if (copy_to_user(uaddr, tmpbuf, mop->size))
2897 			r = -EFAULT;
2898 	} else {
2899 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2900 			r = -EFAULT;
2901 			goto out_unlock;
2902 		}
2903 		r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2904 					      mop->size, GACC_STORE, mop->key);
2905 	}
2906 
2907 out_unlock:
2908 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2909 
2910 	vfree(tmpbuf);
2911 	return r;
2912 }
2913 
kvm_s390_vm_mem_op_cmpxchg(struct kvm * kvm,struct kvm_s390_mem_op * mop)2914 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2915 {
2916 	void __user *uaddr = (void __user *)mop->buf;
2917 	void __user *old_addr = (void __user *)mop->old_addr;
2918 	union {
2919 		__uint128_t quad;
2920 		char raw[sizeof(__uint128_t)];
2921 	} old = { .quad = 0}, new = { .quad = 0 };
2922 	unsigned int off_in_quad = sizeof(new) - mop->size;
2923 	int r, srcu_idx;
2924 	bool success;
2925 
2926 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2927 	if (r)
2928 		return r;
2929 	/*
2930 	 * This validates off_in_quad. Checking that size is a power
2931 	 * of two is not necessary, as cmpxchg_guest_abs_with_key
2932 	 * takes care of that
2933 	 */
2934 	if (mop->size > sizeof(new))
2935 		return -EINVAL;
2936 	if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2937 		return -EFAULT;
2938 	if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2939 		return -EFAULT;
2940 
2941 	srcu_idx = srcu_read_lock(&kvm->srcu);
2942 
2943 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2944 		r = PGM_ADDRESSING;
2945 		goto out_unlock;
2946 	}
2947 
2948 	r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2949 				       new.quad, mop->key, &success);
2950 	if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2951 		r = -EFAULT;
2952 
2953 out_unlock:
2954 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2955 	return r;
2956 }
2957 
kvm_s390_vm_mem_op(struct kvm * kvm,struct kvm_s390_mem_op * mop)2958 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2959 {
2960 	/*
2961 	 * This is technically a heuristic only, if the kvm->lock is not
2962 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2963 	 * This is ok from a kernel perspective, wrongdoing is detected
2964 	 * on the access, -EFAULT is returned and the vm may crash the
2965 	 * next time it accesses the memory in question.
2966 	 * There is no sane usecase to do switching and a memop on two
2967 	 * different CPUs at the same time.
2968 	 */
2969 	if (kvm_s390_pv_get_handle(kvm))
2970 		return -EINVAL;
2971 
2972 	switch (mop->op) {
2973 	case KVM_S390_MEMOP_ABSOLUTE_READ:
2974 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
2975 		return kvm_s390_vm_mem_op_abs(kvm, mop);
2976 	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
2977 		return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
2978 	default:
2979 		return -EINVAL;
2980 	}
2981 }
2982 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2983 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
2984 {
2985 	struct kvm *kvm = filp->private_data;
2986 	void __user *argp = (void __user *)arg;
2987 	struct kvm_device_attr attr;
2988 	int r;
2989 
2990 	switch (ioctl) {
2991 	case KVM_S390_INTERRUPT: {
2992 		struct kvm_s390_interrupt s390int;
2993 
2994 		r = -EFAULT;
2995 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2996 			break;
2997 		r = kvm_s390_inject_vm(kvm, &s390int);
2998 		break;
2999 	}
3000 	case KVM_CREATE_IRQCHIP: {
3001 		struct kvm_irq_routing_entry routing;
3002 
3003 		r = -EINVAL;
3004 		if (kvm->arch.use_irqchip) {
3005 			/* Set up dummy routing. */
3006 			memset(&routing, 0, sizeof(routing));
3007 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
3008 		}
3009 		break;
3010 	}
3011 	case KVM_SET_DEVICE_ATTR: {
3012 		r = -EFAULT;
3013 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3014 			break;
3015 		r = kvm_s390_vm_set_attr(kvm, &attr);
3016 		break;
3017 	}
3018 	case KVM_GET_DEVICE_ATTR: {
3019 		r = -EFAULT;
3020 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3021 			break;
3022 		r = kvm_s390_vm_get_attr(kvm, &attr);
3023 		break;
3024 	}
3025 	case KVM_HAS_DEVICE_ATTR: {
3026 		r = -EFAULT;
3027 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3028 			break;
3029 		r = kvm_s390_vm_has_attr(kvm, &attr);
3030 		break;
3031 	}
3032 	case KVM_S390_GET_SKEYS: {
3033 		struct kvm_s390_skeys args;
3034 
3035 		r = -EFAULT;
3036 		if (copy_from_user(&args, argp,
3037 				   sizeof(struct kvm_s390_skeys)))
3038 			break;
3039 		r = kvm_s390_get_skeys(kvm, &args);
3040 		break;
3041 	}
3042 	case KVM_S390_SET_SKEYS: {
3043 		struct kvm_s390_skeys args;
3044 
3045 		r = -EFAULT;
3046 		if (copy_from_user(&args, argp,
3047 				   sizeof(struct kvm_s390_skeys)))
3048 			break;
3049 		r = kvm_s390_set_skeys(kvm, &args);
3050 		break;
3051 	}
3052 	case KVM_S390_GET_CMMA_BITS: {
3053 		struct kvm_s390_cmma_log args;
3054 
3055 		r = -EFAULT;
3056 		if (copy_from_user(&args, argp, sizeof(args)))
3057 			break;
3058 		mutex_lock(&kvm->slots_lock);
3059 		r = kvm_s390_get_cmma_bits(kvm, &args);
3060 		mutex_unlock(&kvm->slots_lock);
3061 		if (!r) {
3062 			r = copy_to_user(argp, &args, sizeof(args));
3063 			if (r)
3064 				r = -EFAULT;
3065 		}
3066 		break;
3067 	}
3068 	case KVM_S390_SET_CMMA_BITS: {
3069 		struct kvm_s390_cmma_log args;
3070 
3071 		r = -EFAULT;
3072 		if (copy_from_user(&args, argp, sizeof(args)))
3073 			break;
3074 		mutex_lock(&kvm->slots_lock);
3075 		r = kvm_s390_set_cmma_bits(kvm, &args);
3076 		mutex_unlock(&kvm->slots_lock);
3077 		break;
3078 	}
3079 	case KVM_S390_PV_COMMAND: {
3080 		struct kvm_pv_cmd args;
3081 
3082 		/* protvirt means user cpu state */
3083 		kvm_s390_set_user_cpu_state_ctrl(kvm);
3084 		r = 0;
3085 		if (!is_prot_virt_host()) {
3086 			r = -EINVAL;
3087 			break;
3088 		}
3089 		if (copy_from_user(&args, argp, sizeof(args))) {
3090 			r = -EFAULT;
3091 			break;
3092 		}
3093 		if (args.flags) {
3094 			r = -EINVAL;
3095 			break;
3096 		}
3097 		/* must be called without kvm->lock */
3098 		r = kvm_s390_handle_pv(kvm, &args);
3099 		if (copy_to_user(argp, &args, sizeof(args))) {
3100 			r = -EFAULT;
3101 			break;
3102 		}
3103 		break;
3104 	}
3105 	case KVM_S390_MEM_OP: {
3106 		struct kvm_s390_mem_op mem_op;
3107 
3108 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3109 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
3110 		else
3111 			r = -EFAULT;
3112 		break;
3113 	}
3114 	case KVM_S390_ZPCI_OP: {
3115 		struct kvm_s390_zpci_op args;
3116 
3117 		r = -EINVAL;
3118 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3119 			break;
3120 		if (copy_from_user(&args, argp, sizeof(args))) {
3121 			r = -EFAULT;
3122 			break;
3123 		}
3124 		r = kvm_s390_pci_zpci_op(kvm, &args);
3125 		break;
3126 	}
3127 	default:
3128 		r = -ENOTTY;
3129 	}
3130 
3131 	return r;
3132 }
3133 
kvm_s390_apxa_installed(void)3134 static int kvm_s390_apxa_installed(void)
3135 {
3136 	struct ap_config_info info;
3137 
3138 	if (ap_instructions_available()) {
3139 		if (ap_qci(&info) == 0)
3140 			return info.apxa;
3141 	}
3142 
3143 	return 0;
3144 }
3145 
3146 /*
3147  * The format of the crypto control block (CRYCB) is specified in the 3 low
3148  * order bits of the CRYCB designation (CRYCBD) field as follows:
3149  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3150  *	     AP extended addressing (APXA) facility are installed.
3151  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3152  * Format 2: Both the APXA and MSAX3 facilities are installed
3153  */
kvm_s390_set_crycb_format(struct kvm * kvm)3154 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3155 {
3156 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
3157 
3158 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3159 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3160 
3161 	/* Check whether MSAX3 is installed */
3162 	if (!test_kvm_facility(kvm, 76))
3163 		return;
3164 
3165 	if (kvm_s390_apxa_installed())
3166 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3167 	else
3168 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3169 }
3170 
3171 /*
3172  * kvm_arch_crypto_set_masks
3173  *
3174  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3175  *	 to be set.
3176  * @apm: the mask identifying the accessible AP adapters
3177  * @aqm: the mask identifying the accessible AP domains
3178  * @adm: the mask identifying the accessible AP control domains
3179  *
3180  * Set the masks that identify the adapters, domains and control domains to
3181  * which the KVM guest is granted access.
3182  *
3183  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3184  *	 function.
3185  */
kvm_arch_crypto_set_masks(struct kvm * kvm,unsigned long * apm,unsigned long * aqm,unsigned long * adm)3186 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3187 			       unsigned long *aqm, unsigned long *adm)
3188 {
3189 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3190 
3191 	kvm_s390_vcpu_block_all(kvm);
3192 
3193 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3194 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3195 		memcpy(crycb->apcb1.apm, apm, 32);
3196 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3197 			 apm[0], apm[1], apm[2], apm[3]);
3198 		memcpy(crycb->apcb1.aqm, aqm, 32);
3199 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3200 			 aqm[0], aqm[1], aqm[2], aqm[3]);
3201 		memcpy(crycb->apcb1.adm, adm, 32);
3202 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3203 			 adm[0], adm[1], adm[2], adm[3]);
3204 		break;
3205 	case CRYCB_FORMAT1:
3206 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3207 		memcpy(crycb->apcb0.apm, apm, 8);
3208 		memcpy(crycb->apcb0.aqm, aqm, 2);
3209 		memcpy(crycb->apcb0.adm, adm, 2);
3210 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3211 			 apm[0], *((unsigned short *)aqm),
3212 			 *((unsigned short *)adm));
3213 		break;
3214 	default:	/* Can not happen */
3215 		break;
3216 	}
3217 
3218 	/* recreate the shadow crycb for each vcpu */
3219 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3220 	kvm_s390_vcpu_unblock_all(kvm);
3221 }
3222 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3223 
3224 /*
3225  * kvm_arch_crypto_clear_masks
3226  *
3227  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3228  *	 to be cleared.
3229  *
3230  * Clear the masks that identify the adapters, domains and control domains to
3231  * which the KVM guest is granted access.
3232  *
3233  * Note: The kvm->lock mutex must be locked by the caller before invoking this
3234  *	 function.
3235  */
kvm_arch_crypto_clear_masks(struct kvm * kvm)3236 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3237 {
3238 	kvm_s390_vcpu_block_all(kvm);
3239 
3240 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
3241 	       sizeof(kvm->arch.crypto.crycb->apcb0));
3242 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
3243 	       sizeof(kvm->arch.crypto.crycb->apcb1));
3244 
3245 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3246 	/* recreate the shadow crycb for each vcpu */
3247 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3248 	kvm_s390_vcpu_unblock_all(kvm);
3249 }
3250 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3251 
kvm_s390_get_initial_cpuid(void)3252 static u64 kvm_s390_get_initial_cpuid(void)
3253 {
3254 	struct cpuid cpuid;
3255 
3256 	get_cpu_id(&cpuid);
3257 	cpuid.version = 0xff;
3258 	return *((u64 *) &cpuid);
3259 }
3260 
kvm_s390_crypto_init(struct kvm * kvm)3261 static void kvm_s390_crypto_init(struct kvm *kvm)
3262 {
3263 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3264 	kvm_s390_set_crycb_format(kvm);
3265 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3266 
3267 	if (!test_kvm_facility(kvm, 76))
3268 		return;
3269 
3270 	/* Enable AES/DEA protected key functions by default */
3271 	kvm->arch.crypto.aes_kw = 1;
3272 	kvm->arch.crypto.dea_kw = 1;
3273 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3274 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3275 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3276 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3277 }
3278 
sca_dispose(struct kvm * kvm)3279 static void sca_dispose(struct kvm *kvm)
3280 {
3281 	if (kvm->arch.use_esca)
3282 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3283 	else
3284 		free_page((unsigned long)(kvm->arch.sca));
3285 	kvm->arch.sca = NULL;
3286 }
3287 
kvm_arch_free_vm(struct kvm * kvm)3288 void kvm_arch_free_vm(struct kvm *kvm)
3289 {
3290 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3291 		kvm_s390_pci_clear_list(kvm);
3292 
3293 	__kvm_arch_free_vm(kvm);
3294 }
3295 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)3296 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3297 {
3298 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3299 	int i, rc;
3300 	char debug_name[16];
3301 	static unsigned long sca_offset;
3302 
3303 	rc = -EINVAL;
3304 #ifdef CONFIG_KVM_S390_UCONTROL
3305 	if (type & ~KVM_VM_S390_UCONTROL)
3306 		goto out_err;
3307 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3308 		goto out_err;
3309 #else
3310 	if (type)
3311 		goto out_err;
3312 #endif
3313 
3314 	rc = s390_enable_sie();
3315 	if (rc)
3316 		goto out_err;
3317 
3318 	rc = -ENOMEM;
3319 
3320 	if (!sclp.has_64bscao)
3321 		alloc_flags |= GFP_DMA;
3322 	rwlock_init(&kvm->arch.sca_lock);
3323 	/* start with basic SCA */
3324 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3325 	if (!kvm->arch.sca)
3326 		goto out_err;
3327 	mutex_lock(&kvm_lock);
3328 	sca_offset += 16;
3329 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3330 		sca_offset = 0;
3331 	kvm->arch.sca = (struct bsca_block *)
3332 			((char *) kvm->arch.sca + sca_offset);
3333 	mutex_unlock(&kvm_lock);
3334 
3335 	sprintf(debug_name, "kvm-%u", current->pid);
3336 
3337 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3338 	if (!kvm->arch.dbf)
3339 		goto out_err;
3340 
3341 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3342 	kvm->arch.sie_page2 =
3343 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3344 	if (!kvm->arch.sie_page2)
3345 		goto out_err;
3346 
3347 	kvm->arch.sie_page2->kvm = kvm;
3348 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3349 
3350 	for (i = 0; i < kvm_s390_fac_size(); i++) {
3351 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3352 					      (kvm_s390_fac_base[i] |
3353 					       kvm_s390_fac_ext[i]);
3354 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3355 					      kvm_s390_fac_base[i];
3356 	}
3357 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3358 
3359 	/* we are always in czam mode - even on pre z14 machines */
3360 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
3361 	set_kvm_facility(kvm->arch.model.fac_list, 138);
3362 	/* we emulate STHYI in kvm */
3363 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
3364 	set_kvm_facility(kvm->arch.model.fac_list, 74);
3365 	if (MACHINE_HAS_TLB_GUEST) {
3366 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
3367 		set_kvm_facility(kvm->arch.model.fac_list, 147);
3368 	}
3369 
3370 	if (css_general_characteristics.aiv && test_facility(65))
3371 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
3372 
3373 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3374 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3375 
3376 	kvm->arch.model.uv_feat_guest.feat = 0;
3377 
3378 	kvm_s390_crypto_init(kvm);
3379 
3380 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3381 		mutex_lock(&kvm->lock);
3382 		kvm_s390_pci_init_list(kvm);
3383 		kvm_s390_vcpu_pci_enable_interp(kvm);
3384 		mutex_unlock(&kvm->lock);
3385 	}
3386 
3387 	mutex_init(&kvm->arch.float_int.ais_lock);
3388 	spin_lock_init(&kvm->arch.float_int.lock);
3389 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
3390 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3391 	init_waitqueue_head(&kvm->arch.ipte_wq);
3392 	mutex_init(&kvm->arch.ipte_mutex);
3393 
3394 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3395 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3396 
3397 	if (type & KVM_VM_S390_UCONTROL) {
3398 		kvm->arch.gmap = NULL;
3399 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3400 	} else {
3401 		if (sclp.hamax == U64_MAX)
3402 			kvm->arch.mem_limit = TASK_SIZE_MAX;
3403 		else
3404 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3405 						    sclp.hamax + 1);
3406 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3407 		if (!kvm->arch.gmap)
3408 			goto out_err;
3409 		kvm->arch.gmap->private = kvm;
3410 		kvm->arch.gmap->pfault_enabled = 0;
3411 	}
3412 
3413 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
3414 	kvm->arch.use_skf = sclp.has_skey;
3415 	spin_lock_init(&kvm->arch.start_stop_lock);
3416 	kvm_s390_vsie_init(kvm);
3417 	if (use_gisa)
3418 		kvm_s390_gisa_init(kvm);
3419 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3420 	kvm->arch.pv.set_aside = NULL;
3421 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3422 
3423 	return 0;
3424 out_err:
3425 	free_page((unsigned long)kvm->arch.sie_page2);
3426 	debug_unregister(kvm->arch.dbf);
3427 	sca_dispose(kvm);
3428 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3429 	return rc;
3430 }
3431 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)3432 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3433 {
3434 	u16 rc, rrc;
3435 
3436 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3437 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3438 	kvm_s390_clear_local_irqs(vcpu);
3439 	kvm_clear_async_pf_completion_queue(vcpu);
3440 	if (!kvm_is_ucontrol(vcpu->kvm))
3441 		sca_del_vcpu(vcpu);
3442 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3443 
3444 	if (kvm_is_ucontrol(vcpu->kvm))
3445 		gmap_remove(vcpu->arch.gmap);
3446 
3447 	if (vcpu->kvm->arch.use_cmma)
3448 		kvm_s390_vcpu_unsetup_cmma(vcpu);
3449 	/* We can not hold the vcpu mutex here, we are already dying */
3450 	if (kvm_s390_pv_cpu_get_handle(vcpu))
3451 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3452 	free_page((unsigned long)(vcpu->arch.sie_block));
3453 }
3454 
kvm_arch_destroy_vm(struct kvm * kvm)3455 void kvm_arch_destroy_vm(struct kvm *kvm)
3456 {
3457 	u16 rc, rrc;
3458 
3459 	kvm_destroy_vcpus(kvm);
3460 	sca_dispose(kvm);
3461 	kvm_s390_gisa_destroy(kvm);
3462 	/*
3463 	 * We are already at the end of life and kvm->lock is not taken.
3464 	 * This is ok as the file descriptor is closed by now and nobody
3465 	 * can mess with the pv state.
3466 	 */
3467 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3468 	/*
3469 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3470 	 * and only if one was registered to begin with. If the VM is
3471 	 * currently not protected, but has been previously been protected,
3472 	 * then it's possible that the notifier is still registered.
3473 	 */
3474 	if (kvm->arch.pv.mmu_notifier.ops)
3475 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3476 
3477 	debug_unregister(kvm->arch.dbf);
3478 	free_page((unsigned long)kvm->arch.sie_page2);
3479 	if (!kvm_is_ucontrol(kvm))
3480 		gmap_remove(kvm->arch.gmap);
3481 	kvm_s390_destroy_adapters(kvm);
3482 	kvm_s390_clear_float_irqs(kvm);
3483 	kvm_s390_vsie_destroy(kvm);
3484 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3485 }
3486 
3487 /* Section: vcpu related */
__kvm_ucontrol_vcpu_init(struct kvm_vcpu * vcpu)3488 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3489 {
3490 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3491 	if (!vcpu->arch.gmap)
3492 		return -ENOMEM;
3493 	vcpu->arch.gmap->private = vcpu->kvm;
3494 
3495 	return 0;
3496 }
3497 
sca_del_vcpu(struct kvm_vcpu * vcpu)3498 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3499 {
3500 	if (!kvm_s390_use_sca_entries())
3501 		return;
3502 	read_lock(&vcpu->kvm->arch.sca_lock);
3503 	if (vcpu->kvm->arch.use_esca) {
3504 		struct esca_block *sca = vcpu->kvm->arch.sca;
3505 
3506 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3507 		sca->cpu[vcpu->vcpu_id].sda = 0;
3508 	} else {
3509 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3510 
3511 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3512 		sca->cpu[vcpu->vcpu_id].sda = 0;
3513 	}
3514 	read_unlock(&vcpu->kvm->arch.sca_lock);
3515 }
3516 
sca_add_vcpu(struct kvm_vcpu * vcpu)3517 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3518 {
3519 	if (!kvm_s390_use_sca_entries()) {
3520 		phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3521 
3522 		/* we still need the basic sca for the ipte control */
3523 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3524 		vcpu->arch.sie_block->scaol = sca_phys;
3525 		return;
3526 	}
3527 	read_lock(&vcpu->kvm->arch.sca_lock);
3528 	if (vcpu->kvm->arch.use_esca) {
3529 		struct esca_block *sca = vcpu->kvm->arch.sca;
3530 		phys_addr_t sca_phys = virt_to_phys(sca);
3531 
3532 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3533 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3534 		vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3535 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3536 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3537 	} else {
3538 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3539 		phys_addr_t sca_phys = virt_to_phys(sca);
3540 
3541 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3542 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3543 		vcpu->arch.sie_block->scaol = sca_phys;
3544 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3545 	}
3546 	read_unlock(&vcpu->kvm->arch.sca_lock);
3547 }
3548 
3549 /* Basic SCA to Extended SCA data copy routines */
sca_copy_entry(struct esca_entry * d,struct bsca_entry * s)3550 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3551 {
3552 	d->sda = s->sda;
3553 	d->sigp_ctrl.c = s->sigp_ctrl.c;
3554 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3555 }
3556 
sca_copy_b_to_e(struct esca_block * d,struct bsca_block * s)3557 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3558 {
3559 	int i;
3560 
3561 	d->ipte_control = s->ipte_control;
3562 	d->mcn[0] = s->mcn;
3563 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3564 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3565 }
3566 
sca_switch_to_extended(struct kvm * kvm)3567 static int sca_switch_to_extended(struct kvm *kvm)
3568 {
3569 	struct bsca_block *old_sca = kvm->arch.sca;
3570 	struct esca_block *new_sca;
3571 	struct kvm_vcpu *vcpu;
3572 	unsigned long vcpu_idx;
3573 	u32 scaol, scaoh;
3574 	phys_addr_t new_sca_phys;
3575 
3576 	if (kvm->arch.use_esca)
3577 		return 0;
3578 
3579 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3580 	if (!new_sca)
3581 		return -ENOMEM;
3582 
3583 	new_sca_phys = virt_to_phys(new_sca);
3584 	scaoh = new_sca_phys >> 32;
3585 	scaol = new_sca_phys & ESCA_SCAOL_MASK;
3586 
3587 	kvm_s390_vcpu_block_all(kvm);
3588 	write_lock(&kvm->arch.sca_lock);
3589 
3590 	sca_copy_b_to_e(new_sca, old_sca);
3591 
3592 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3593 		vcpu->arch.sie_block->scaoh = scaoh;
3594 		vcpu->arch.sie_block->scaol = scaol;
3595 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3596 	}
3597 	kvm->arch.sca = new_sca;
3598 	kvm->arch.use_esca = 1;
3599 
3600 	write_unlock(&kvm->arch.sca_lock);
3601 	kvm_s390_vcpu_unblock_all(kvm);
3602 
3603 	free_page((unsigned long)old_sca);
3604 
3605 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3606 		 old_sca, kvm->arch.sca);
3607 	return 0;
3608 }
3609 
sca_can_add_vcpu(struct kvm * kvm,unsigned int id)3610 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3611 {
3612 	int rc;
3613 
3614 	if (!kvm_s390_use_sca_entries()) {
3615 		if (id < KVM_MAX_VCPUS)
3616 			return true;
3617 		return false;
3618 	}
3619 	if (id < KVM_S390_BSCA_CPU_SLOTS)
3620 		return true;
3621 	if (!sclp.has_esca || !sclp.has_64bscao)
3622 		return false;
3623 
3624 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3625 
3626 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3627 }
3628 
3629 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__start_cpu_timer_accounting(struct kvm_vcpu * vcpu)3630 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3631 {
3632 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3633 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3634 	vcpu->arch.cputm_start = get_tod_clock_fast();
3635 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3636 }
3637 
3638 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__stop_cpu_timer_accounting(struct kvm_vcpu * vcpu)3639 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3640 {
3641 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3642 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3643 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3644 	vcpu->arch.cputm_start = 0;
3645 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3646 }
3647 
3648 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__enable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3649 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3650 {
3651 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3652 	vcpu->arch.cputm_enabled = true;
3653 	__start_cpu_timer_accounting(vcpu);
3654 }
3655 
3656 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
__disable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3657 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3658 {
3659 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3660 	__stop_cpu_timer_accounting(vcpu);
3661 	vcpu->arch.cputm_enabled = false;
3662 }
3663 
enable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3664 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3665 {
3666 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3667 	__enable_cpu_timer_accounting(vcpu);
3668 	preempt_enable();
3669 }
3670 
disable_cpu_timer_accounting(struct kvm_vcpu * vcpu)3671 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3672 {
3673 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3674 	__disable_cpu_timer_accounting(vcpu);
3675 	preempt_enable();
3676 }
3677 
3678 /* set the cpu timer - may only be called from the VCPU thread itself */
kvm_s390_set_cpu_timer(struct kvm_vcpu * vcpu,__u64 cputm)3679 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3680 {
3681 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3682 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3683 	if (vcpu->arch.cputm_enabled)
3684 		vcpu->arch.cputm_start = get_tod_clock_fast();
3685 	vcpu->arch.sie_block->cputm = cputm;
3686 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3687 	preempt_enable();
3688 }
3689 
3690 /* update and get the cpu timer - can also be called from other VCPU threads */
kvm_s390_get_cpu_timer(struct kvm_vcpu * vcpu)3691 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3692 {
3693 	unsigned int seq;
3694 	__u64 value;
3695 
3696 	if (unlikely(!vcpu->arch.cputm_enabled))
3697 		return vcpu->arch.sie_block->cputm;
3698 
3699 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3700 	do {
3701 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3702 		/*
3703 		 * If the writer would ever execute a read in the critical
3704 		 * section, e.g. in irq context, we have a deadlock.
3705 		 */
3706 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3707 		value = vcpu->arch.sie_block->cputm;
3708 		/* if cputm_start is 0, accounting is being started/stopped */
3709 		if (likely(vcpu->arch.cputm_start))
3710 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3711 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3712 	preempt_enable();
3713 	return value;
3714 }
3715 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)3716 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3717 {
3718 
3719 	gmap_enable(vcpu->arch.enabled_gmap);
3720 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3721 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3722 		__start_cpu_timer_accounting(vcpu);
3723 	vcpu->cpu = cpu;
3724 }
3725 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)3726 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3727 {
3728 	vcpu->cpu = -1;
3729 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3730 		__stop_cpu_timer_accounting(vcpu);
3731 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3732 	vcpu->arch.enabled_gmap = gmap_get_enabled();
3733 	gmap_disable(vcpu->arch.enabled_gmap);
3734 
3735 }
3736 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)3737 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3738 {
3739 	mutex_lock(&vcpu->kvm->lock);
3740 	preempt_disable();
3741 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3742 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3743 	preempt_enable();
3744 	mutex_unlock(&vcpu->kvm->lock);
3745 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3746 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3747 		sca_add_vcpu(vcpu);
3748 	}
3749 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3750 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3751 	/* make vcpu_load load the right gmap on the first trigger */
3752 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3753 }
3754 
kvm_has_pckmo_subfunc(struct kvm * kvm,unsigned long nr)3755 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3756 {
3757 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3758 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3759 		return true;
3760 	return false;
3761 }
3762 
kvm_has_pckmo_ecc(struct kvm * kvm)3763 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3764 {
3765 	/* At least one ECC subfunction must be present */
3766 	return kvm_has_pckmo_subfunc(kvm, 32) ||
3767 	       kvm_has_pckmo_subfunc(kvm, 33) ||
3768 	       kvm_has_pckmo_subfunc(kvm, 34) ||
3769 	       kvm_has_pckmo_subfunc(kvm, 40) ||
3770 	       kvm_has_pckmo_subfunc(kvm, 41);
3771 
3772 }
3773 
kvm_s390_vcpu_crypto_setup(struct kvm_vcpu * vcpu)3774 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3775 {
3776 	/*
3777 	 * If the AP instructions are not being interpreted and the MSAX3
3778 	 * facility is not configured for the guest, there is nothing to set up.
3779 	 */
3780 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3781 		return;
3782 
3783 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3784 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3785 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
3786 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3787 
3788 	if (vcpu->kvm->arch.crypto.apie)
3789 		vcpu->arch.sie_block->eca |= ECA_APIE;
3790 
3791 	/* Set up protected key support */
3792 	if (vcpu->kvm->arch.crypto.aes_kw) {
3793 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3794 		/* ecc is also wrapped with AES key */
3795 		if (kvm_has_pckmo_ecc(vcpu->kvm))
3796 			vcpu->arch.sie_block->ecd |= ECD_ECC;
3797 	}
3798 
3799 	if (vcpu->kvm->arch.crypto.dea_kw)
3800 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3801 }
3802 
kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu * vcpu)3803 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3804 {
3805 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3806 	vcpu->arch.sie_block->cbrlo = 0;
3807 }
3808 
kvm_s390_vcpu_setup_cmma(struct kvm_vcpu * vcpu)3809 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3810 {
3811 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3812 
3813 	if (!cbrlo_page)
3814 		return -ENOMEM;
3815 
3816 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3817 	return 0;
3818 }
3819 
kvm_s390_vcpu_setup_model(struct kvm_vcpu * vcpu)3820 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3821 {
3822 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3823 
3824 	vcpu->arch.sie_block->ibc = model->ibc;
3825 	if (test_kvm_facility(vcpu->kvm, 7))
3826 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3827 }
3828 
kvm_s390_vcpu_setup(struct kvm_vcpu * vcpu)3829 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3830 {
3831 	int rc = 0;
3832 	u16 uvrc, uvrrc;
3833 
3834 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3835 						    CPUSTAT_SM |
3836 						    CPUSTAT_STOPPED);
3837 
3838 	if (test_kvm_facility(vcpu->kvm, 78))
3839 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3840 	else if (test_kvm_facility(vcpu->kvm, 8))
3841 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3842 
3843 	kvm_s390_vcpu_setup_model(vcpu);
3844 
3845 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3846 	if (MACHINE_HAS_ESOP)
3847 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3848 	if (test_kvm_facility(vcpu->kvm, 9))
3849 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3850 	if (test_kvm_facility(vcpu->kvm, 11))
3851 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3852 	if (test_kvm_facility(vcpu->kvm, 73))
3853 		vcpu->arch.sie_block->ecb |= ECB_TE;
3854 	if (!kvm_is_ucontrol(vcpu->kvm))
3855 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
3856 
3857 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3858 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3859 	if (test_kvm_facility(vcpu->kvm, 130))
3860 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3861 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3862 	if (sclp.has_cei)
3863 		vcpu->arch.sie_block->eca |= ECA_CEI;
3864 	if (sclp.has_ib)
3865 		vcpu->arch.sie_block->eca |= ECA_IB;
3866 	if (sclp.has_siif)
3867 		vcpu->arch.sie_block->eca |= ECA_SII;
3868 	if (sclp.has_sigpif)
3869 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
3870 	if (test_kvm_facility(vcpu->kvm, 129)) {
3871 		vcpu->arch.sie_block->eca |= ECA_VX;
3872 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3873 	}
3874 	if (test_kvm_facility(vcpu->kvm, 139))
3875 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3876 	if (test_kvm_facility(vcpu->kvm, 156))
3877 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3878 	if (vcpu->arch.sie_block->gd) {
3879 		vcpu->arch.sie_block->eca |= ECA_AIV;
3880 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3881 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3882 	}
3883 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3884 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3885 
3886 	if (sclp.has_kss)
3887 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3888 	else
3889 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3890 
3891 	if (vcpu->kvm->arch.use_cmma) {
3892 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3893 		if (rc)
3894 			return rc;
3895 	}
3896 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3897 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3898 
3899 	vcpu->arch.sie_block->hpid = HPID_KVM;
3900 
3901 	kvm_s390_vcpu_crypto_setup(vcpu);
3902 
3903 	kvm_s390_vcpu_pci_setup(vcpu);
3904 
3905 	mutex_lock(&vcpu->kvm->lock);
3906 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3907 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3908 		if (rc)
3909 			kvm_s390_vcpu_unsetup_cmma(vcpu);
3910 	}
3911 	mutex_unlock(&vcpu->kvm->lock);
3912 
3913 	return rc;
3914 }
3915 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)3916 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3917 {
3918 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3919 		return -EINVAL;
3920 	return 0;
3921 }
3922 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)3923 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3924 {
3925 	struct sie_page *sie_page;
3926 	int rc;
3927 
3928 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3929 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3930 	if (!sie_page)
3931 		return -ENOMEM;
3932 
3933 	vcpu->arch.sie_block = &sie_page->sie_block;
3934 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3935 
3936 	/* the real guest size will always be smaller than msl */
3937 	vcpu->arch.sie_block->mso = 0;
3938 	vcpu->arch.sie_block->msl = sclp.hamax;
3939 
3940 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3941 	spin_lock_init(&vcpu->arch.local_int.lock);
3942 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3943 	seqcount_init(&vcpu->arch.cputm_seqcount);
3944 
3945 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3946 	kvm_clear_async_pf_completion_queue(vcpu);
3947 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3948 				    KVM_SYNC_GPRS |
3949 				    KVM_SYNC_ACRS |
3950 				    KVM_SYNC_CRS |
3951 				    KVM_SYNC_ARCH0 |
3952 				    KVM_SYNC_PFAULT |
3953 				    KVM_SYNC_DIAG318;
3954 	kvm_s390_set_prefix(vcpu, 0);
3955 	if (test_kvm_facility(vcpu->kvm, 64))
3956 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3957 	if (test_kvm_facility(vcpu->kvm, 82))
3958 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3959 	if (test_kvm_facility(vcpu->kvm, 133))
3960 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3961 	if (test_kvm_facility(vcpu->kvm, 156))
3962 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3963 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3964 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3965 	 */
3966 	if (MACHINE_HAS_VX)
3967 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3968 	else
3969 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3970 
3971 	if (kvm_is_ucontrol(vcpu->kvm)) {
3972 		rc = __kvm_ucontrol_vcpu_init(vcpu);
3973 		if (rc)
3974 			goto out_free_sie_block;
3975 	}
3976 
3977 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3978 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3979 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3980 
3981 	rc = kvm_s390_vcpu_setup(vcpu);
3982 	if (rc)
3983 		goto out_ucontrol_uninit;
3984 
3985 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3986 	return 0;
3987 
3988 out_ucontrol_uninit:
3989 	if (kvm_is_ucontrol(vcpu->kvm))
3990 		gmap_remove(vcpu->arch.gmap);
3991 out_free_sie_block:
3992 	free_page((unsigned long)(vcpu->arch.sie_block));
3993 	return rc;
3994 }
3995 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)3996 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3997 {
3998 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3999 	return kvm_s390_vcpu_has_irq(vcpu, 0);
4000 }
4001 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)4002 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4003 {
4004 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4005 }
4006 
kvm_s390_vcpu_block(struct kvm_vcpu * vcpu)4007 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
4008 {
4009 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4010 	exit_sie(vcpu);
4011 }
4012 
kvm_s390_vcpu_unblock(struct kvm_vcpu * vcpu)4013 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
4014 {
4015 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4016 }
4017 
kvm_s390_vcpu_request(struct kvm_vcpu * vcpu)4018 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
4019 {
4020 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4021 	exit_sie(vcpu);
4022 }
4023 
kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu * vcpu)4024 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
4025 {
4026 	return atomic_read(&vcpu->arch.sie_block->prog20) &
4027 	       (PROG_BLOCK_SIE | PROG_REQUEST);
4028 }
4029 
kvm_s390_vcpu_request_handled(struct kvm_vcpu * vcpu)4030 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
4031 {
4032 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4033 }
4034 
4035 /*
4036  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
4037  * If the CPU is not running (e.g. waiting as idle) the function will
4038  * return immediately. */
exit_sie(struct kvm_vcpu * vcpu)4039 void exit_sie(struct kvm_vcpu *vcpu)
4040 {
4041 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
4042 	kvm_s390_vsie_kick(vcpu);
4043 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4044 		cpu_relax();
4045 }
4046 
4047 /* Kick a guest cpu out of SIE to process a request synchronously */
kvm_s390_sync_request(int req,struct kvm_vcpu * vcpu)4048 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
4049 {
4050 	__kvm_make_request(req, vcpu);
4051 	kvm_s390_vcpu_request(vcpu);
4052 }
4053 
kvm_gmap_notifier(struct gmap * gmap,unsigned long start,unsigned long end)4054 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
4055 			      unsigned long end)
4056 {
4057 	struct kvm *kvm = gmap->private;
4058 	struct kvm_vcpu *vcpu;
4059 	unsigned long prefix;
4060 	unsigned long i;
4061 
4062 	if (gmap_is_shadow(gmap))
4063 		return;
4064 	if (start >= 1UL << 31)
4065 		/* We are only interested in prefix pages */
4066 		return;
4067 	kvm_for_each_vcpu(i, vcpu, kvm) {
4068 		/* match against both prefix pages */
4069 		prefix = kvm_s390_get_prefix(vcpu);
4070 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
4071 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
4072 				   start, end);
4073 			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4074 		}
4075 	}
4076 }
4077 
kvm_arch_no_poll(struct kvm_vcpu * vcpu)4078 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
4079 {
4080 	/* do not poll with more than halt_poll_max_steal percent of steal time */
4081 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
4082 	    READ_ONCE(halt_poll_max_steal)) {
4083 		vcpu->stat.halt_no_poll_steal++;
4084 		return true;
4085 	}
4086 	return false;
4087 }
4088 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)4089 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4090 {
4091 	/* kvm common code refers to this, but never calls it */
4092 	BUG();
4093 	return 0;
4094 }
4095 
kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)4096 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4097 					   struct kvm_one_reg *reg)
4098 {
4099 	int r = -EINVAL;
4100 
4101 	switch (reg->id) {
4102 	case KVM_REG_S390_TODPR:
4103 		r = put_user(vcpu->arch.sie_block->todpr,
4104 			     (u32 __user *)reg->addr);
4105 		break;
4106 	case KVM_REG_S390_EPOCHDIFF:
4107 		r = put_user(vcpu->arch.sie_block->epoch,
4108 			     (u64 __user *)reg->addr);
4109 		break;
4110 	case KVM_REG_S390_CPU_TIMER:
4111 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
4112 			     (u64 __user *)reg->addr);
4113 		break;
4114 	case KVM_REG_S390_CLOCK_COMP:
4115 		r = put_user(vcpu->arch.sie_block->ckc,
4116 			     (u64 __user *)reg->addr);
4117 		break;
4118 	case KVM_REG_S390_PFTOKEN:
4119 		r = put_user(vcpu->arch.pfault_token,
4120 			     (u64 __user *)reg->addr);
4121 		break;
4122 	case KVM_REG_S390_PFCOMPARE:
4123 		r = put_user(vcpu->arch.pfault_compare,
4124 			     (u64 __user *)reg->addr);
4125 		break;
4126 	case KVM_REG_S390_PFSELECT:
4127 		r = put_user(vcpu->arch.pfault_select,
4128 			     (u64 __user *)reg->addr);
4129 		break;
4130 	case KVM_REG_S390_PP:
4131 		r = put_user(vcpu->arch.sie_block->pp,
4132 			     (u64 __user *)reg->addr);
4133 		break;
4134 	case KVM_REG_S390_GBEA:
4135 		r = put_user(vcpu->arch.sie_block->gbea,
4136 			     (u64 __user *)reg->addr);
4137 		break;
4138 	default:
4139 		break;
4140 	}
4141 
4142 	return r;
4143 }
4144 
kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)4145 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4146 					   struct kvm_one_reg *reg)
4147 {
4148 	int r = -EINVAL;
4149 	__u64 val;
4150 
4151 	switch (reg->id) {
4152 	case KVM_REG_S390_TODPR:
4153 		r = get_user(vcpu->arch.sie_block->todpr,
4154 			     (u32 __user *)reg->addr);
4155 		break;
4156 	case KVM_REG_S390_EPOCHDIFF:
4157 		r = get_user(vcpu->arch.sie_block->epoch,
4158 			     (u64 __user *)reg->addr);
4159 		break;
4160 	case KVM_REG_S390_CPU_TIMER:
4161 		r = get_user(val, (u64 __user *)reg->addr);
4162 		if (!r)
4163 			kvm_s390_set_cpu_timer(vcpu, val);
4164 		break;
4165 	case KVM_REG_S390_CLOCK_COMP:
4166 		r = get_user(vcpu->arch.sie_block->ckc,
4167 			     (u64 __user *)reg->addr);
4168 		break;
4169 	case KVM_REG_S390_PFTOKEN:
4170 		r = get_user(vcpu->arch.pfault_token,
4171 			     (u64 __user *)reg->addr);
4172 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4173 			kvm_clear_async_pf_completion_queue(vcpu);
4174 		break;
4175 	case KVM_REG_S390_PFCOMPARE:
4176 		r = get_user(vcpu->arch.pfault_compare,
4177 			     (u64 __user *)reg->addr);
4178 		break;
4179 	case KVM_REG_S390_PFSELECT:
4180 		r = get_user(vcpu->arch.pfault_select,
4181 			     (u64 __user *)reg->addr);
4182 		break;
4183 	case KVM_REG_S390_PP:
4184 		r = get_user(vcpu->arch.sie_block->pp,
4185 			     (u64 __user *)reg->addr);
4186 		break;
4187 	case KVM_REG_S390_GBEA:
4188 		r = get_user(vcpu->arch.sie_block->gbea,
4189 			     (u64 __user *)reg->addr);
4190 		break;
4191 	default:
4192 		break;
4193 	}
4194 
4195 	return r;
4196 }
4197 
kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu * vcpu)4198 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4199 {
4200 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4201 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4202 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4203 
4204 	kvm_clear_async_pf_completion_queue(vcpu);
4205 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4206 		kvm_s390_vcpu_stop(vcpu);
4207 	kvm_s390_clear_local_irqs(vcpu);
4208 }
4209 
kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu * vcpu)4210 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4211 {
4212 	/* Initial reset is a superset of the normal reset */
4213 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4214 
4215 	/*
4216 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
4217 	 * We do not only reset the internal data, but also ...
4218 	 */
4219 	vcpu->arch.sie_block->gpsw.mask = 0;
4220 	vcpu->arch.sie_block->gpsw.addr = 0;
4221 	kvm_s390_set_prefix(vcpu, 0);
4222 	kvm_s390_set_cpu_timer(vcpu, 0);
4223 	vcpu->arch.sie_block->ckc = 0;
4224 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4225 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4226 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4227 
4228 	/* ... the data in sync regs */
4229 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4230 	vcpu->run->s.regs.ckc = 0;
4231 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4232 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4233 	vcpu->run->psw_addr = 0;
4234 	vcpu->run->psw_mask = 0;
4235 	vcpu->run->s.regs.todpr = 0;
4236 	vcpu->run->s.regs.cputm = 0;
4237 	vcpu->run->s.regs.ckc = 0;
4238 	vcpu->run->s.regs.pp = 0;
4239 	vcpu->run->s.regs.gbea = 1;
4240 	vcpu->run->s.regs.fpc = 0;
4241 	/*
4242 	 * Do not reset these registers in the protected case, as some of
4243 	 * them are overlaid and they are not accessible in this case
4244 	 * anyway.
4245 	 */
4246 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4247 		vcpu->arch.sie_block->gbea = 1;
4248 		vcpu->arch.sie_block->pp = 0;
4249 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4250 		vcpu->arch.sie_block->todpr = 0;
4251 	}
4252 }
4253 
kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu * vcpu)4254 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4255 {
4256 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4257 
4258 	/* Clear reset is a superset of the initial reset */
4259 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4260 
4261 	memset(&regs->gprs, 0, sizeof(regs->gprs));
4262 	memset(&regs->vrs, 0, sizeof(regs->vrs));
4263 	memset(&regs->acrs, 0, sizeof(regs->acrs));
4264 	memset(&regs->gscb, 0, sizeof(regs->gscb));
4265 
4266 	regs->etoken = 0;
4267 	regs->etoken_extension = 0;
4268 }
4269 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)4270 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4271 {
4272 	vcpu_load(vcpu);
4273 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4274 	vcpu_put(vcpu);
4275 	return 0;
4276 }
4277 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)4278 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4279 {
4280 	vcpu_load(vcpu);
4281 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4282 	vcpu_put(vcpu);
4283 	return 0;
4284 }
4285 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)4286 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4287 				  struct kvm_sregs *sregs)
4288 {
4289 	vcpu_load(vcpu);
4290 
4291 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4292 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4293 
4294 	vcpu_put(vcpu);
4295 	return 0;
4296 }
4297 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)4298 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4299 				  struct kvm_sregs *sregs)
4300 {
4301 	vcpu_load(vcpu);
4302 
4303 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4304 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4305 
4306 	vcpu_put(vcpu);
4307 	return 0;
4308 }
4309 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)4310 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4311 {
4312 	int ret = 0;
4313 
4314 	vcpu_load(vcpu);
4315 
4316 	vcpu->run->s.regs.fpc = fpu->fpc;
4317 	if (MACHINE_HAS_VX)
4318 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4319 				 (freg_t *) fpu->fprs);
4320 	else
4321 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4322 
4323 	vcpu_put(vcpu);
4324 	return ret;
4325 }
4326 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)4327 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4328 {
4329 	vcpu_load(vcpu);
4330 
4331 	/* make sure we have the latest values */
4332 	save_fpu_regs();
4333 	if (MACHINE_HAS_VX)
4334 		convert_vx_to_fp((freg_t *) fpu->fprs,
4335 				 (__vector128 *) vcpu->run->s.regs.vrs);
4336 	else
4337 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4338 	fpu->fpc = vcpu->run->s.regs.fpc;
4339 
4340 	vcpu_put(vcpu);
4341 	return 0;
4342 }
4343 
kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu * vcpu,psw_t psw)4344 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4345 {
4346 	int rc = 0;
4347 
4348 	if (!is_vcpu_stopped(vcpu))
4349 		rc = -EBUSY;
4350 	else {
4351 		vcpu->run->psw_mask = psw.mask;
4352 		vcpu->run->psw_addr = psw.addr;
4353 	}
4354 	return rc;
4355 }
4356 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)4357 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4358 				  struct kvm_translation *tr)
4359 {
4360 	return -EINVAL; /* not implemented yet */
4361 }
4362 
4363 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4364 			      KVM_GUESTDBG_USE_HW_BP | \
4365 			      KVM_GUESTDBG_ENABLE)
4366 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)4367 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4368 					struct kvm_guest_debug *dbg)
4369 {
4370 	int rc = 0;
4371 
4372 	vcpu_load(vcpu);
4373 
4374 	vcpu->guest_debug = 0;
4375 	kvm_s390_clear_bp_data(vcpu);
4376 
4377 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4378 		rc = -EINVAL;
4379 		goto out;
4380 	}
4381 	if (!sclp.has_gpere) {
4382 		rc = -EINVAL;
4383 		goto out;
4384 	}
4385 
4386 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
4387 		vcpu->guest_debug = dbg->control;
4388 		/* enforce guest PER */
4389 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4390 
4391 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4392 			rc = kvm_s390_import_bp_data(vcpu, dbg);
4393 	} else {
4394 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4395 		vcpu->arch.guestdbg.last_bp = 0;
4396 	}
4397 
4398 	if (rc) {
4399 		vcpu->guest_debug = 0;
4400 		kvm_s390_clear_bp_data(vcpu);
4401 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4402 	}
4403 
4404 out:
4405 	vcpu_put(vcpu);
4406 	return rc;
4407 }
4408 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)4409 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4410 				    struct kvm_mp_state *mp_state)
4411 {
4412 	int ret;
4413 
4414 	vcpu_load(vcpu);
4415 
4416 	/* CHECK_STOP and LOAD are not supported yet */
4417 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4418 				      KVM_MP_STATE_OPERATING;
4419 
4420 	vcpu_put(vcpu);
4421 	return ret;
4422 }
4423 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)4424 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4425 				    struct kvm_mp_state *mp_state)
4426 {
4427 	int rc = 0;
4428 
4429 	vcpu_load(vcpu);
4430 
4431 	/* user space knows about this interface - let it control the state */
4432 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4433 
4434 	switch (mp_state->mp_state) {
4435 	case KVM_MP_STATE_STOPPED:
4436 		rc = kvm_s390_vcpu_stop(vcpu);
4437 		break;
4438 	case KVM_MP_STATE_OPERATING:
4439 		rc = kvm_s390_vcpu_start(vcpu);
4440 		break;
4441 	case KVM_MP_STATE_LOAD:
4442 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4443 			rc = -ENXIO;
4444 			break;
4445 		}
4446 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4447 		break;
4448 	case KVM_MP_STATE_CHECK_STOP:
4449 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
4450 	default:
4451 		rc = -ENXIO;
4452 	}
4453 
4454 	vcpu_put(vcpu);
4455 	return rc;
4456 }
4457 
ibs_enabled(struct kvm_vcpu * vcpu)4458 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4459 {
4460 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4461 }
4462 
kvm_s390_handle_requests(struct kvm_vcpu * vcpu)4463 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4464 {
4465 retry:
4466 	kvm_s390_vcpu_request_handled(vcpu);
4467 	if (!kvm_request_pending(vcpu))
4468 		return 0;
4469 	/*
4470 	 * If the guest prefix changed, re-arm the ipte notifier for the
4471 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4472 	 * This ensures that the ipte instruction for this request has
4473 	 * already finished. We might race against a second unmapper that
4474 	 * wants to set the blocking bit. Lets just retry the request loop.
4475 	 */
4476 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4477 		int rc;
4478 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4479 					  kvm_s390_get_prefix(vcpu),
4480 					  PAGE_SIZE * 2, PROT_WRITE);
4481 		if (rc) {
4482 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4483 			return rc;
4484 		}
4485 		goto retry;
4486 	}
4487 
4488 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4489 		vcpu->arch.sie_block->ihcpu = 0xffff;
4490 		goto retry;
4491 	}
4492 
4493 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4494 		if (!ibs_enabled(vcpu)) {
4495 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4496 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4497 		}
4498 		goto retry;
4499 	}
4500 
4501 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4502 		if (ibs_enabled(vcpu)) {
4503 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4504 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4505 		}
4506 		goto retry;
4507 	}
4508 
4509 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4510 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4511 		goto retry;
4512 	}
4513 
4514 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4515 		/*
4516 		 * Disable CMM virtualization; we will emulate the ESSA
4517 		 * instruction manually, in order to provide additional
4518 		 * functionalities needed for live migration.
4519 		 */
4520 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4521 		goto retry;
4522 	}
4523 
4524 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4525 		/*
4526 		 * Re-enable CMM virtualization if CMMA is available and
4527 		 * CMM has been used.
4528 		 */
4529 		if ((vcpu->kvm->arch.use_cmma) &&
4530 		    (vcpu->kvm->mm->context.uses_cmm))
4531 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4532 		goto retry;
4533 	}
4534 
4535 	/* we left the vsie handler, nothing to do, just clear the request */
4536 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4537 
4538 	return 0;
4539 }
4540 
__kvm_s390_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod)4541 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4542 {
4543 	struct kvm_vcpu *vcpu;
4544 	union tod_clock clk;
4545 	unsigned long i;
4546 
4547 	preempt_disable();
4548 
4549 	store_tod_clock_ext(&clk);
4550 
4551 	kvm->arch.epoch = gtod->tod - clk.tod;
4552 	kvm->arch.epdx = 0;
4553 	if (test_kvm_facility(kvm, 139)) {
4554 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4555 		if (kvm->arch.epoch > gtod->tod)
4556 			kvm->arch.epdx -= 1;
4557 	}
4558 
4559 	kvm_s390_vcpu_block_all(kvm);
4560 	kvm_for_each_vcpu(i, vcpu, kvm) {
4561 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4562 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
4563 	}
4564 
4565 	kvm_s390_vcpu_unblock_all(kvm);
4566 	preempt_enable();
4567 }
4568 
kvm_s390_try_set_tod_clock(struct kvm * kvm,const struct kvm_s390_vm_tod_clock * gtod)4569 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4570 {
4571 	if (!mutex_trylock(&kvm->lock))
4572 		return 0;
4573 	__kvm_s390_set_tod_clock(kvm, gtod);
4574 	mutex_unlock(&kvm->lock);
4575 	return 1;
4576 }
4577 
4578 /**
4579  * kvm_arch_fault_in_page - fault-in guest page if necessary
4580  * @vcpu: The corresponding virtual cpu
4581  * @gpa: Guest physical address
4582  * @writable: Whether the page should be writable or not
4583  *
4584  * Make sure that a guest page has been faulted-in on the host.
4585  *
4586  * Return: Zero on success, negative error code otherwise.
4587  */
kvm_arch_fault_in_page(struct kvm_vcpu * vcpu,gpa_t gpa,int writable)4588 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
4589 {
4590 	return gmap_fault(vcpu->arch.gmap, gpa,
4591 			  writable ? FAULT_FLAG_WRITE : 0);
4592 }
4593 
__kvm_inject_pfault_token(struct kvm_vcpu * vcpu,bool start_token,unsigned long token)4594 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4595 				      unsigned long token)
4596 {
4597 	struct kvm_s390_interrupt inti;
4598 	struct kvm_s390_irq irq;
4599 
4600 	if (start_token) {
4601 		irq.u.ext.ext_params2 = token;
4602 		irq.type = KVM_S390_INT_PFAULT_INIT;
4603 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4604 	} else {
4605 		inti.type = KVM_S390_INT_PFAULT_DONE;
4606 		inti.parm64 = token;
4607 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4608 	}
4609 }
4610 
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4611 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4612 				     struct kvm_async_pf *work)
4613 {
4614 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4615 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4616 
4617 	return true;
4618 }
4619 
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4620 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4621 				 struct kvm_async_pf *work)
4622 {
4623 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4624 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4625 }
4626 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)4627 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4628 			       struct kvm_async_pf *work)
4629 {
4630 	/* s390 will always inject the page directly */
4631 }
4632 
kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu * vcpu)4633 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4634 {
4635 	/*
4636 	 * s390 will always inject the page directly,
4637 	 * but we still want check_async_completion to cleanup
4638 	 */
4639 	return true;
4640 }
4641 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu)4642 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4643 {
4644 	hva_t hva;
4645 	struct kvm_arch_async_pf arch;
4646 
4647 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4648 		return false;
4649 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4650 	    vcpu->arch.pfault_compare)
4651 		return false;
4652 	if (psw_extint_disabled(vcpu))
4653 		return false;
4654 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4655 		return false;
4656 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4657 		return false;
4658 	if (!vcpu->arch.gmap->pfault_enabled)
4659 		return false;
4660 
4661 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4662 	hva += current->thread.gmap_addr & ~PAGE_MASK;
4663 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4664 		return false;
4665 
4666 	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4667 }
4668 
vcpu_pre_run(struct kvm_vcpu * vcpu)4669 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4670 {
4671 	int rc, cpuflags;
4672 
4673 	/*
4674 	 * On s390 notifications for arriving pages will be delivered directly
4675 	 * to the guest but the house keeping for completed pfaults is
4676 	 * handled outside the worker.
4677 	 */
4678 	kvm_check_async_pf_completion(vcpu);
4679 
4680 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4681 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4682 
4683 	if (need_resched())
4684 		schedule();
4685 
4686 	if (!kvm_is_ucontrol(vcpu->kvm)) {
4687 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
4688 		if (rc || guestdbg_exit_pending(vcpu))
4689 			return rc;
4690 	}
4691 
4692 	rc = kvm_s390_handle_requests(vcpu);
4693 	if (rc)
4694 		return rc;
4695 
4696 	if (guestdbg_enabled(vcpu)) {
4697 		kvm_s390_backup_guest_per_regs(vcpu);
4698 		kvm_s390_patch_guest_per_regs(vcpu);
4699 	}
4700 
4701 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4702 
4703 	vcpu->arch.sie_block->icptcode = 0;
4704 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4705 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4706 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
4707 
4708 	return 0;
4709 }
4710 
vcpu_post_run_fault_in_sie(struct kvm_vcpu * vcpu)4711 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4712 {
4713 	struct kvm_s390_pgm_info pgm_info = {
4714 		.code = PGM_ADDRESSING,
4715 	};
4716 	u8 opcode, ilen;
4717 	int rc;
4718 
4719 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4720 	trace_kvm_s390_sie_fault(vcpu);
4721 
4722 	/*
4723 	 * We want to inject an addressing exception, which is defined as a
4724 	 * suppressing or terminating exception. However, since we came here
4725 	 * by a DAT access exception, the PSW still points to the faulting
4726 	 * instruction since DAT exceptions are nullifying. So we've got
4727 	 * to look up the current opcode to get the length of the instruction
4728 	 * to be able to forward the PSW.
4729 	 */
4730 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4731 	ilen = insn_length(opcode);
4732 	if (rc < 0) {
4733 		return rc;
4734 	} else if (rc) {
4735 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
4736 		 * Forward by arbitrary ilc, injection will take care of
4737 		 * nullification if necessary.
4738 		 */
4739 		pgm_info = vcpu->arch.pgm;
4740 		ilen = 4;
4741 	}
4742 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4743 	kvm_s390_forward_psw(vcpu, ilen);
4744 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4745 }
4746 
vcpu_post_run(struct kvm_vcpu * vcpu,int exit_reason)4747 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4748 {
4749 	struct mcck_volatile_info *mcck_info;
4750 	struct sie_page *sie_page;
4751 
4752 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4753 		   vcpu->arch.sie_block->icptcode);
4754 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4755 
4756 	if (guestdbg_enabled(vcpu))
4757 		kvm_s390_restore_guest_per_regs(vcpu);
4758 
4759 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4760 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4761 
4762 	if (exit_reason == -EINTR) {
4763 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
4764 		sie_page = container_of(vcpu->arch.sie_block,
4765 					struct sie_page, sie_block);
4766 		mcck_info = &sie_page->mcck_info;
4767 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
4768 		return 0;
4769 	}
4770 
4771 	if (vcpu->arch.sie_block->icptcode > 0) {
4772 		int rc = kvm_handle_sie_intercept(vcpu);
4773 
4774 		if (rc != -EOPNOTSUPP)
4775 			return rc;
4776 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4777 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4778 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4779 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4780 		return -EREMOTE;
4781 	} else if (exit_reason != -EFAULT) {
4782 		vcpu->stat.exit_null++;
4783 		return 0;
4784 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4785 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4786 		vcpu->run->s390_ucontrol.trans_exc_code =
4787 						current->thread.gmap_addr;
4788 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
4789 		return -EREMOTE;
4790 	} else if (current->thread.gmap_pfault) {
4791 		trace_kvm_s390_major_guest_pfault(vcpu);
4792 		current->thread.gmap_pfault = 0;
4793 		if (kvm_arch_setup_async_pf(vcpu))
4794 			return 0;
4795 		vcpu->stat.pfault_sync++;
4796 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4797 	}
4798 	return vcpu_post_run_fault_in_sie(vcpu);
4799 }
4800 
4801 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
__vcpu_run(struct kvm_vcpu * vcpu)4802 static int __vcpu_run(struct kvm_vcpu *vcpu)
4803 {
4804 	int rc, exit_reason;
4805 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4806 
4807 	/*
4808 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4809 	 * ning the guest), so that memslots (and other stuff) are protected
4810 	 */
4811 	kvm_vcpu_srcu_read_lock(vcpu);
4812 
4813 	do {
4814 		rc = vcpu_pre_run(vcpu);
4815 		if (rc || guestdbg_exit_pending(vcpu))
4816 			break;
4817 
4818 		kvm_vcpu_srcu_read_unlock(vcpu);
4819 		/*
4820 		 * As PF_VCPU will be used in fault handler, between
4821 		 * guest_enter and guest_exit should be no uaccess.
4822 		 */
4823 		local_irq_disable();
4824 		guest_enter_irqoff();
4825 		__disable_cpu_timer_accounting(vcpu);
4826 		local_irq_enable();
4827 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4828 			memcpy(sie_page->pv_grregs,
4829 			       vcpu->run->s.regs.gprs,
4830 			       sizeof(sie_page->pv_grregs));
4831 		}
4832 		if (test_cpu_flag(CIF_FPU))
4833 			load_fpu_regs();
4834 		exit_reason = sie64a(vcpu->arch.sie_block,
4835 				     vcpu->run->s.regs.gprs);
4836 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4837 			memcpy(vcpu->run->s.regs.gprs,
4838 			       sie_page->pv_grregs,
4839 			       sizeof(sie_page->pv_grregs));
4840 			/*
4841 			 * We're not allowed to inject interrupts on intercepts
4842 			 * that leave the guest state in an "in-between" state
4843 			 * where the next SIE entry will do a continuation.
4844 			 * Fence interrupts in our "internal" PSW.
4845 			 */
4846 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4847 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4848 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4849 			}
4850 		}
4851 		local_irq_disable();
4852 		__enable_cpu_timer_accounting(vcpu);
4853 		guest_exit_irqoff();
4854 		local_irq_enable();
4855 		kvm_vcpu_srcu_read_lock(vcpu);
4856 
4857 		rc = vcpu_post_run(vcpu, exit_reason);
4858 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4859 
4860 	kvm_vcpu_srcu_read_unlock(vcpu);
4861 	return rc;
4862 }
4863 
sync_regs_fmt2(struct kvm_vcpu * vcpu)4864 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4865 {
4866 	struct kvm_run *kvm_run = vcpu->run;
4867 	struct runtime_instr_cb *riccb;
4868 	struct gs_cb *gscb;
4869 
4870 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4871 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4872 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4873 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4874 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4875 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4876 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4877 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4878 	}
4879 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4880 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4881 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4882 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4883 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4884 			kvm_clear_async_pf_completion_queue(vcpu);
4885 	}
4886 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4887 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4888 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4889 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4890 	}
4891 	/*
4892 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
4893 	 * we should enable RI here instead of doing the lazy enablement.
4894 	 */
4895 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4896 	    test_kvm_facility(vcpu->kvm, 64) &&
4897 	    riccb->v &&
4898 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4899 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
4900 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4901 	}
4902 	/*
4903 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
4904 	 * we should enable GS here instead of doing the lazy enablement.
4905 	 */
4906 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4907 	    test_kvm_facility(vcpu->kvm, 133) &&
4908 	    gscb->gssm &&
4909 	    !vcpu->arch.gs_enabled) {
4910 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4911 		vcpu->arch.sie_block->ecb |= ECB_GS;
4912 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4913 		vcpu->arch.gs_enabled = 1;
4914 	}
4915 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4916 	    test_kvm_facility(vcpu->kvm, 82)) {
4917 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4918 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4919 	}
4920 	if (MACHINE_HAS_GS) {
4921 		preempt_disable();
4922 		__ctl_set_bit(2, 4);
4923 		if (current->thread.gs_cb) {
4924 			vcpu->arch.host_gscb = current->thread.gs_cb;
4925 			save_gs_cb(vcpu->arch.host_gscb);
4926 		}
4927 		if (vcpu->arch.gs_enabled) {
4928 			current->thread.gs_cb = (struct gs_cb *)
4929 						&vcpu->run->s.regs.gscb;
4930 			restore_gs_cb(current->thread.gs_cb);
4931 		}
4932 		preempt_enable();
4933 	}
4934 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4935 }
4936 
sync_regs(struct kvm_vcpu * vcpu)4937 static void sync_regs(struct kvm_vcpu *vcpu)
4938 {
4939 	struct kvm_run *kvm_run = vcpu->run;
4940 
4941 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4942 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4943 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4944 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4945 		/* some control register changes require a tlb flush */
4946 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4947 	}
4948 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4949 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4950 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4951 	}
4952 	save_access_regs(vcpu->arch.host_acrs);
4953 	restore_access_regs(vcpu->run->s.regs.acrs);
4954 	/* save host (userspace) fprs/vrs */
4955 	save_fpu_regs();
4956 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4957 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4958 	if (MACHINE_HAS_VX)
4959 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4960 	else
4961 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4962 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4963 	if (test_fp_ctl(current->thread.fpu.fpc))
4964 		/* User space provided an invalid FPC, let's clear it */
4965 		current->thread.fpu.fpc = 0;
4966 
4967 	/* Sync fmt2 only data */
4968 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4969 		sync_regs_fmt2(vcpu);
4970 	} else {
4971 		/*
4972 		 * In several places we have to modify our internal view to
4973 		 * not do things that are disallowed by the ultravisor. For
4974 		 * example we must not inject interrupts after specific exits
4975 		 * (e.g. 112 prefix page not secure). We do this by turning
4976 		 * off the machine check, external and I/O interrupt bits
4977 		 * of our PSW copy. To avoid getting validity intercepts, we
4978 		 * do only accept the condition code from userspace.
4979 		 */
4980 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4981 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4982 						   PSW_MASK_CC;
4983 	}
4984 
4985 	kvm_run->kvm_dirty_regs = 0;
4986 }
4987 
store_regs_fmt2(struct kvm_vcpu * vcpu)4988 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4989 {
4990 	struct kvm_run *kvm_run = vcpu->run;
4991 
4992 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4993 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4994 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4995 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4996 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4997 	if (MACHINE_HAS_GS) {
4998 		preempt_disable();
4999 		__ctl_set_bit(2, 4);
5000 		if (vcpu->arch.gs_enabled)
5001 			save_gs_cb(current->thread.gs_cb);
5002 		current->thread.gs_cb = vcpu->arch.host_gscb;
5003 		restore_gs_cb(vcpu->arch.host_gscb);
5004 		if (!vcpu->arch.host_gscb)
5005 			__ctl_clear_bit(2, 4);
5006 		vcpu->arch.host_gscb = NULL;
5007 		preempt_enable();
5008 	}
5009 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
5010 }
5011 
store_regs(struct kvm_vcpu * vcpu)5012 static void store_regs(struct kvm_vcpu *vcpu)
5013 {
5014 	struct kvm_run *kvm_run = vcpu->run;
5015 
5016 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5017 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5018 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
5019 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5020 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
5021 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5022 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5023 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5024 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5025 	save_access_regs(vcpu->run->s.regs.acrs);
5026 	restore_access_regs(vcpu->arch.host_acrs);
5027 	/* Save guest register state */
5028 	save_fpu_regs();
5029 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5030 	/* Restore will be done lazily at return */
5031 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
5032 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
5033 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
5034 		store_regs_fmt2(vcpu);
5035 }
5036 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)5037 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
5038 {
5039 	struct kvm_run *kvm_run = vcpu->run;
5040 	int rc;
5041 
5042 	/*
5043 	 * Running a VM while dumping always has the potential to
5044 	 * produce inconsistent dump data. But for PV vcpus a SIE
5045 	 * entry while dumping could also lead to a fatal validity
5046 	 * intercept which we absolutely want to avoid.
5047 	 */
5048 	if (vcpu->kvm->arch.pv.dumping)
5049 		return -EINVAL;
5050 
5051 	if (kvm_run->immediate_exit)
5052 		return -EINTR;
5053 
5054 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
5055 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
5056 		return -EINVAL;
5057 
5058 	vcpu_load(vcpu);
5059 
5060 	if (guestdbg_exit_pending(vcpu)) {
5061 		kvm_s390_prepare_debug_exit(vcpu);
5062 		rc = 0;
5063 		goto out;
5064 	}
5065 
5066 	kvm_sigset_activate(vcpu);
5067 
5068 	/*
5069 	 * no need to check the return value of vcpu_start as it can only have
5070 	 * an error for protvirt, but protvirt means user cpu state
5071 	 */
5072 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
5073 		kvm_s390_vcpu_start(vcpu);
5074 	} else if (is_vcpu_stopped(vcpu)) {
5075 		pr_err_ratelimited("can't run stopped vcpu %d\n",
5076 				   vcpu->vcpu_id);
5077 		rc = -EINVAL;
5078 		goto out;
5079 	}
5080 
5081 	sync_regs(vcpu);
5082 	enable_cpu_timer_accounting(vcpu);
5083 
5084 	might_fault();
5085 	rc = __vcpu_run(vcpu);
5086 
5087 	if (signal_pending(current) && !rc) {
5088 		kvm_run->exit_reason = KVM_EXIT_INTR;
5089 		rc = -EINTR;
5090 	}
5091 
5092 	if (guestdbg_exit_pending(vcpu) && !rc)  {
5093 		kvm_s390_prepare_debug_exit(vcpu);
5094 		rc = 0;
5095 	}
5096 
5097 	if (rc == -EREMOTE) {
5098 		/* userspace support is needed, kvm_run has been prepared */
5099 		rc = 0;
5100 	}
5101 
5102 	disable_cpu_timer_accounting(vcpu);
5103 	store_regs(vcpu);
5104 
5105 	kvm_sigset_deactivate(vcpu);
5106 
5107 	vcpu->stat.exit_userspace++;
5108 out:
5109 	vcpu_put(vcpu);
5110 	return rc;
5111 }
5112 
5113 /*
5114  * store status at address
5115  * we use have two special cases:
5116  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5117  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5118  */
kvm_s390_store_status_unloaded(struct kvm_vcpu * vcpu,unsigned long gpa)5119 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5120 {
5121 	unsigned char archmode = 1;
5122 	freg_t fprs[NUM_FPRS];
5123 	unsigned int px;
5124 	u64 clkcomp, cputm;
5125 	int rc;
5126 
5127 	px = kvm_s390_get_prefix(vcpu);
5128 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5129 		if (write_guest_abs(vcpu, 163, &archmode, 1))
5130 			return -EFAULT;
5131 		gpa = 0;
5132 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5133 		if (write_guest_real(vcpu, 163, &archmode, 1))
5134 			return -EFAULT;
5135 		gpa = px;
5136 	} else
5137 		gpa -= __LC_FPREGS_SAVE_AREA;
5138 
5139 	/* manually convert vector registers if necessary */
5140 	if (MACHINE_HAS_VX) {
5141 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5142 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5143 				     fprs, 128);
5144 	} else {
5145 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5146 				     vcpu->run->s.regs.fprs, 128);
5147 	}
5148 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5149 			      vcpu->run->s.regs.gprs, 128);
5150 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5151 			      &vcpu->arch.sie_block->gpsw, 16);
5152 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5153 			      &px, 4);
5154 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5155 			      &vcpu->run->s.regs.fpc, 4);
5156 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5157 			      &vcpu->arch.sie_block->todpr, 4);
5158 	cputm = kvm_s390_get_cpu_timer(vcpu);
5159 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5160 			      &cputm, 8);
5161 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5162 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5163 			      &clkcomp, 8);
5164 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5165 			      &vcpu->run->s.regs.acrs, 64);
5166 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5167 			      &vcpu->arch.sie_block->gcr, 128);
5168 	return rc ? -EFAULT : 0;
5169 }
5170 
kvm_s390_vcpu_store_status(struct kvm_vcpu * vcpu,unsigned long addr)5171 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5172 {
5173 	/*
5174 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5175 	 * switch in the run ioctl. Let's update our copies before we save
5176 	 * it into the save area
5177 	 */
5178 	save_fpu_regs();
5179 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5180 	save_access_regs(vcpu->run->s.regs.acrs);
5181 
5182 	return kvm_s390_store_status_unloaded(vcpu, addr);
5183 }
5184 
__disable_ibs_on_vcpu(struct kvm_vcpu * vcpu)5185 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5186 {
5187 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5188 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5189 }
5190 
__disable_ibs_on_all_vcpus(struct kvm * kvm)5191 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5192 {
5193 	unsigned long i;
5194 	struct kvm_vcpu *vcpu;
5195 
5196 	kvm_for_each_vcpu(i, vcpu, kvm) {
5197 		__disable_ibs_on_vcpu(vcpu);
5198 	}
5199 }
5200 
__enable_ibs_on_vcpu(struct kvm_vcpu * vcpu)5201 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5202 {
5203 	if (!sclp.has_ibs)
5204 		return;
5205 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5206 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5207 }
5208 
kvm_s390_vcpu_start(struct kvm_vcpu * vcpu)5209 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5210 {
5211 	int i, online_vcpus, r = 0, started_vcpus = 0;
5212 
5213 	if (!is_vcpu_stopped(vcpu))
5214 		return 0;
5215 
5216 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5217 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5218 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5219 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5220 
5221 	/* Let's tell the UV that we want to change into the operating state */
5222 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5223 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5224 		if (r) {
5225 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5226 			return r;
5227 		}
5228 	}
5229 
5230 	for (i = 0; i < online_vcpus; i++) {
5231 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5232 			started_vcpus++;
5233 	}
5234 
5235 	if (started_vcpus == 0) {
5236 		/* we're the only active VCPU -> speed it up */
5237 		__enable_ibs_on_vcpu(vcpu);
5238 	} else if (started_vcpus == 1) {
5239 		/*
5240 		 * As we are starting a second VCPU, we have to disable
5241 		 * the IBS facility on all VCPUs to remove potentially
5242 		 * outstanding ENABLE requests.
5243 		 */
5244 		__disable_ibs_on_all_vcpus(vcpu->kvm);
5245 	}
5246 
5247 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5248 	/*
5249 	 * The real PSW might have changed due to a RESTART interpreted by the
5250 	 * ultravisor. We block all interrupts and let the next sie exit
5251 	 * refresh our view.
5252 	 */
5253 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5254 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5255 	/*
5256 	 * Another VCPU might have used IBS while we were offline.
5257 	 * Let's play safe and flush the VCPU at startup.
5258 	 */
5259 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5260 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5261 	return 0;
5262 }
5263 
kvm_s390_vcpu_stop(struct kvm_vcpu * vcpu)5264 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5265 {
5266 	int i, online_vcpus, r = 0, started_vcpus = 0;
5267 	struct kvm_vcpu *started_vcpu = NULL;
5268 
5269 	if (is_vcpu_stopped(vcpu))
5270 		return 0;
5271 
5272 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5273 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5274 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
5275 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5276 
5277 	/* Let's tell the UV that we want to change into the stopped state */
5278 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5279 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5280 		if (r) {
5281 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5282 			return r;
5283 		}
5284 	}
5285 
5286 	/*
5287 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5288 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5289 	 * have been fully processed. This will ensure that the VCPU
5290 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5291 	 */
5292 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5293 	kvm_s390_clear_stop_irq(vcpu);
5294 
5295 	__disable_ibs_on_vcpu(vcpu);
5296 
5297 	for (i = 0; i < online_vcpus; i++) {
5298 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5299 
5300 		if (!is_vcpu_stopped(tmp)) {
5301 			started_vcpus++;
5302 			started_vcpu = tmp;
5303 		}
5304 	}
5305 
5306 	if (started_vcpus == 1) {
5307 		/*
5308 		 * As we only have one VCPU left, we want to enable the
5309 		 * IBS facility for that VCPU to speed it up.
5310 		 */
5311 		__enable_ibs_on_vcpu(started_vcpu);
5312 	}
5313 
5314 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5315 	return 0;
5316 }
5317 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)5318 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5319 				     struct kvm_enable_cap *cap)
5320 {
5321 	int r;
5322 
5323 	if (cap->flags)
5324 		return -EINVAL;
5325 
5326 	switch (cap->cap) {
5327 	case KVM_CAP_S390_CSS_SUPPORT:
5328 		if (!vcpu->kvm->arch.css_support) {
5329 			vcpu->kvm->arch.css_support = 1;
5330 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5331 			trace_kvm_s390_enable_css(vcpu->kvm);
5332 		}
5333 		r = 0;
5334 		break;
5335 	default:
5336 		r = -EINVAL;
5337 		break;
5338 	}
5339 	return r;
5340 }
5341 
kvm_s390_vcpu_sida_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5342 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5343 				  struct kvm_s390_mem_op *mop)
5344 {
5345 	void __user *uaddr = (void __user *)mop->buf;
5346 	void *sida_addr;
5347 	int r = 0;
5348 
5349 	if (mop->flags || !mop->size)
5350 		return -EINVAL;
5351 	if (mop->size + mop->sida_offset < mop->size)
5352 		return -EINVAL;
5353 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5354 		return -E2BIG;
5355 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
5356 		return -EINVAL;
5357 
5358 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5359 
5360 	switch (mop->op) {
5361 	case KVM_S390_MEMOP_SIDA_READ:
5362 		if (copy_to_user(uaddr, sida_addr, mop->size))
5363 			r = -EFAULT;
5364 
5365 		break;
5366 	case KVM_S390_MEMOP_SIDA_WRITE:
5367 		if (copy_from_user(sida_addr, uaddr, mop->size))
5368 			r = -EFAULT;
5369 		break;
5370 	}
5371 	return r;
5372 }
5373 
kvm_s390_vcpu_mem_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5374 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5375 				 struct kvm_s390_mem_op *mop)
5376 {
5377 	void __user *uaddr = (void __user *)mop->buf;
5378 	enum gacc_mode acc_mode;
5379 	void *tmpbuf = NULL;
5380 	int r;
5381 
5382 	r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5383 					KVM_S390_MEMOP_F_CHECK_ONLY |
5384 					KVM_S390_MEMOP_F_SKEY_PROTECTION);
5385 	if (r)
5386 		return r;
5387 	if (mop->ar >= NUM_ACRS)
5388 		return -EINVAL;
5389 	if (kvm_s390_pv_cpu_is_protected(vcpu))
5390 		return -EINVAL;
5391 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5392 		tmpbuf = vmalloc(mop->size);
5393 		if (!tmpbuf)
5394 			return -ENOMEM;
5395 	}
5396 
5397 	acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5398 	if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5399 		r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5400 				    acc_mode, mop->key);
5401 		goto out_inject;
5402 	}
5403 	if (acc_mode == GACC_FETCH) {
5404 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5405 					mop->size, mop->key);
5406 		if (r)
5407 			goto out_inject;
5408 		if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5409 			r = -EFAULT;
5410 			goto out_free;
5411 		}
5412 	} else {
5413 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5414 			r = -EFAULT;
5415 			goto out_free;
5416 		}
5417 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5418 					 mop->size, mop->key);
5419 	}
5420 
5421 out_inject:
5422 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5423 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5424 
5425 out_free:
5426 	vfree(tmpbuf);
5427 	return r;
5428 }
5429 
kvm_s390_vcpu_memsida_op(struct kvm_vcpu * vcpu,struct kvm_s390_mem_op * mop)5430 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5431 				     struct kvm_s390_mem_op *mop)
5432 {
5433 	int r, srcu_idx;
5434 
5435 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5436 
5437 	switch (mop->op) {
5438 	case KVM_S390_MEMOP_LOGICAL_READ:
5439 	case KVM_S390_MEMOP_LOGICAL_WRITE:
5440 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
5441 		break;
5442 	case KVM_S390_MEMOP_SIDA_READ:
5443 	case KVM_S390_MEMOP_SIDA_WRITE:
5444 		/* we are locked against sida going away by the vcpu->mutex */
5445 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
5446 		break;
5447 	default:
5448 		r = -EINVAL;
5449 	}
5450 
5451 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5452 	return r;
5453 }
5454 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5455 long kvm_arch_vcpu_async_ioctl(struct file *filp,
5456 			       unsigned int ioctl, unsigned long arg)
5457 {
5458 	struct kvm_vcpu *vcpu = filp->private_data;
5459 	void __user *argp = (void __user *)arg;
5460 	int rc;
5461 
5462 	switch (ioctl) {
5463 	case KVM_S390_IRQ: {
5464 		struct kvm_s390_irq s390irq;
5465 
5466 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5467 			return -EFAULT;
5468 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5469 		break;
5470 	}
5471 	case KVM_S390_INTERRUPT: {
5472 		struct kvm_s390_interrupt s390int;
5473 		struct kvm_s390_irq s390irq = {};
5474 
5475 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
5476 			return -EFAULT;
5477 		if (s390int_to_s390irq(&s390int, &s390irq))
5478 			return -EINVAL;
5479 		rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5480 		break;
5481 	}
5482 	default:
5483 		rc = -ENOIOCTLCMD;
5484 		break;
5485 	}
5486 
5487 	/*
5488 	 * To simplify single stepping of userspace-emulated instructions,
5489 	 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5490 	 * should_handle_per_ifetch()). However, if userspace emulation injects
5491 	 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5492 	 * after (and not before) the interrupt delivery.
5493 	 */
5494 	if (!rc)
5495 		vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5496 
5497 	return rc;
5498 }
5499 
kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu * vcpu,struct kvm_pv_cmd * cmd)5500 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5501 					struct kvm_pv_cmd *cmd)
5502 {
5503 	struct kvm_s390_pv_dmp dmp;
5504 	void *data;
5505 	int ret;
5506 
5507 	/* Dump initialization is a prerequisite */
5508 	if (!vcpu->kvm->arch.pv.dumping)
5509 		return -EINVAL;
5510 
5511 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5512 		return -EFAULT;
5513 
5514 	/* We only handle this subcmd right now */
5515 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
5516 		return -EINVAL;
5517 
5518 	/* CPU dump length is the same as create cpu storage donation. */
5519 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5520 		return -EINVAL;
5521 
5522 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5523 	if (!data)
5524 		return -ENOMEM;
5525 
5526 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5527 
5528 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5529 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
5530 
5531 	if (ret)
5532 		ret = -EINVAL;
5533 
5534 	/* On success copy over the dump data */
5535 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5536 		ret = -EFAULT;
5537 
5538 	kvfree(data);
5539 	return ret;
5540 }
5541 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)5542 long kvm_arch_vcpu_ioctl(struct file *filp,
5543 			 unsigned int ioctl, unsigned long arg)
5544 {
5545 	struct kvm_vcpu *vcpu = filp->private_data;
5546 	void __user *argp = (void __user *)arg;
5547 	int idx;
5548 	long r;
5549 	u16 rc, rrc;
5550 
5551 	vcpu_load(vcpu);
5552 
5553 	switch (ioctl) {
5554 	case KVM_S390_STORE_STATUS:
5555 		idx = srcu_read_lock(&vcpu->kvm->srcu);
5556 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5557 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5558 		break;
5559 	case KVM_S390_SET_INITIAL_PSW: {
5560 		psw_t psw;
5561 
5562 		r = -EFAULT;
5563 		if (copy_from_user(&psw, argp, sizeof(psw)))
5564 			break;
5565 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5566 		break;
5567 	}
5568 	case KVM_S390_CLEAR_RESET:
5569 		r = 0;
5570 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5571 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5572 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5573 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5574 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5575 				   rc, rrc);
5576 		}
5577 		break;
5578 	case KVM_S390_INITIAL_RESET:
5579 		r = 0;
5580 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5581 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5582 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5583 					  UVC_CMD_CPU_RESET_INITIAL,
5584 					  &rc, &rrc);
5585 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5586 				   rc, rrc);
5587 		}
5588 		break;
5589 	case KVM_S390_NORMAL_RESET:
5590 		r = 0;
5591 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5592 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5593 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5594 					  UVC_CMD_CPU_RESET, &rc, &rrc);
5595 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5596 				   rc, rrc);
5597 		}
5598 		break;
5599 	case KVM_SET_ONE_REG:
5600 	case KVM_GET_ONE_REG: {
5601 		struct kvm_one_reg reg;
5602 		r = -EINVAL;
5603 		if (kvm_s390_pv_cpu_is_protected(vcpu))
5604 			break;
5605 		r = -EFAULT;
5606 		if (copy_from_user(&reg, argp, sizeof(reg)))
5607 			break;
5608 		if (ioctl == KVM_SET_ONE_REG)
5609 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
5610 		else
5611 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
5612 		break;
5613 	}
5614 #ifdef CONFIG_KVM_S390_UCONTROL
5615 	case KVM_S390_UCAS_MAP: {
5616 		struct kvm_s390_ucas_mapping ucasmap;
5617 
5618 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5619 			r = -EFAULT;
5620 			break;
5621 		}
5622 
5623 		if (!kvm_is_ucontrol(vcpu->kvm)) {
5624 			r = -EINVAL;
5625 			break;
5626 		}
5627 
5628 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5629 				     ucasmap.vcpu_addr, ucasmap.length);
5630 		break;
5631 	}
5632 	case KVM_S390_UCAS_UNMAP: {
5633 		struct kvm_s390_ucas_mapping ucasmap;
5634 
5635 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5636 			r = -EFAULT;
5637 			break;
5638 		}
5639 
5640 		if (!kvm_is_ucontrol(vcpu->kvm)) {
5641 			r = -EINVAL;
5642 			break;
5643 		}
5644 
5645 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5646 			ucasmap.length);
5647 		break;
5648 	}
5649 #endif
5650 	case KVM_S390_VCPU_FAULT: {
5651 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5652 		break;
5653 	}
5654 	case KVM_ENABLE_CAP:
5655 	{
5656 		struct kvm_enable_cap cap;
5657 		r = -EFAULT;
5658 		if (copy_from_user(&cap, argp, sizeof(cap)))
5659 			break;
5660 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5661 		break;
5662 	}
5663 	case KVM_S390_MEM_OP: {
5664 		struct kvm_s390_mem_op mem_op;
5665 
5666 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5667 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5668 		else
5669 			r = -EFAULT;
5670 		break;
5671 	}
5672 	case KVM_S390_SET_IRQ_STATE: {
5673 		struct kvm_s390_irq_state irq_state;
5674 
5675 		r = -EFAULT;
5676 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5677 			break;
5678 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5679 		    irq_state.len == 0 ||
5680 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5681 			r = -EINVAL;
5682 			break;
5683 		}
5684 		/* do not use irq_state.flags, it will break old QEMUs */
5685 		r = kvm_s390_set_irq_state(vcpu,
5686 					   (void __user *) irq_state.buf,
5687 					   irq_state.len);
5688 		break;
5689 	}
5690 	case KVM_S390_GET_IRQ_STATE: {
5691 		struct kvm_s390_irq_state irq_state;
5692 
5693 		r = -EFAULT;
5694 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5695 			break;
5696 		if (irq_state.len == 0) {
5697 			r = -EINVAL;
5698 			break;
5699 		}
5700 		/* do not use irq_state.flags, it will break old QEMUs */
5701 		r = kvm_s390_get_irq_state(vcpu,
5702 					   (__u8 __user *)  irq_state.buf,
5703 					   irq_state.len);
5704 		break;
5705 	}
5706 	case KVM_S390_PV_CPU_COMMAND: {
5707 		struct kvm_pv_cmd cmd;
5708 
5709 		r = -EINVAL;
5710 		if (!is_prot_virt_host())
5711 			break;
5712 
5713 		r = -EFAULT;
5714 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
5715 			break;
5716 
5717 		r = -EINVAL;
5718 		if (cmd.flags)
5719 			break;
5720 
5721 		/* We only handle this cmd right now */
5722 		if (cmd.cmd != KVM_PV_DUMP)
5723 			break;
5724 
5725 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5726 
5727 		/* Always copy over UV rc / rrc data */
5728 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5729 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5730 			r = -EFAULT;
5731 		break;
5732 	}
5733 	default:
5734 		r = -ENOTTY;
5735 	}
5736 
5737 	vcpu_put(vcpu);
5738 	return r;
5739 }
5740 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)5741 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5742 {
5743 #ifdef CONFIG_KVM_S390_UCONTROL
5744 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5745 		 && (kvm_is_ucontrol(vcpu->kvm))) {
5746 		vmf->page = virt_to_page(vcpu->arch.sie_block);
5747 		get_page(vmf->page);
5748 		return 0;
5749 	}
5750 #endif
5751 	return VM_FAULT_SIGBUS;
5752 }
5753 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)5754 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5755 {
5756 	return true;
5757 }
5758 
5759 /* Section: memory related */
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)5760 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5761 				   const struct kvm_memory_slot *old,
5762 				   struct kvm_memory_slot *new,
5763 				   enum kvm_mr_change change)
5764 {
5765 	gpa_t size;
5766 
5767 	/* When we are protected, we should not change the memory slots */
5768 	if (kvm_s390_pv_get_handle(kvm))
5769 		return -EINVAL;
5770 
5771 	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5772 		/*
5773 		 * A few sanity checks. We can have memory slots which have to be
5774 		 * located/ended at a segment boundary (1MB). The memory in userland is
5775 		 * ok to be fragmented into various different vmas. It is okay to mmap()
5776 		 * and munmap() stuff in this slot after doing this call at any time
5777 		 */
5778 
5779 		if (new->userspace_addr & 0xffffful)
5780 			return -EINVAL;
5781 
5782 		size = new->npages * PAGE_SIZE;
5783 		if (size & 0xffffful)
5784 			return -EINVAL;
5785 
5786 		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5787 			return -EINVAL;
5788 	}
5789 
5790 	if (!kvm->arch.migration_mode)
5791 		return 0;
5792 
5793 	/*
5794 	 * Turn off migration mode when:
5795 	 * - userspace creates a new memslot with dirty logging off,
5796 	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5797 	 *   dirty logging is turned off.
5798 	 * Migration mode expects dirty page logging being enabled to store
5799 	 * its dirty bitmap.
5800 	 */
5801 	if (change != KVM_MR_DELETE &&
5802 	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5803 		WARN(kvm_s390_vm_stop_migration(kvm),
5804 		     "Failed to stop migration mode");
5805 
5806 	return 0;
5807 }
5808 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)5809 void kvm_arch_commit_memory_region(struct kvm *kvm,
5810 				struct kvm_memory_slot *old,
5811 				const struct kvm_memory_slot *new,
5812 				enum kvm_mr_change change)
5813 {
5814 	int rc = 0;
5815 
5816 	switch (change) {
5817 	case KVM_MR_DELETE:
5818 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5819 					old->npages * PAGE_SIZE);
5820 		break;
5821 	case KVM_MR_MOVE:
5822 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5823 					old->npages * PAGE_SIZE);
5824 		if (rc)
5825 			break;
5826 		fallthrough;
5827 	case KVM_MR_CREATE:
5828 		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5829 				      new->base_gfn * PAGE_SIZE,
5830 				      new->npages * PAGE_SIZE);
5831 		break;
5832 	case KVM_MR_FLAGS_ONLY:
5833 		break;
5834 	default:
5835 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5836 	}
5837 	if (rc)
5838 		pr_warn("failed to commit memory region\n");
5839 	return;
5840 }
5841 
nonhyp_mask(int i)5842 static inline unsigned long nonhyp_mask(int i)
5843 {
5844 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5845 
5846 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5847 }
5848 
kvm_s390_init(void)5849 static int __init kvm_s390_init(void)
5850 {
5851 	int i, r;
5852 
5853 	if (!sclp.has_sief2) {
5854 		pr_info("SIE is not available\n");
5855 		return -ENODEV;
5856 	}
5857 
5858 	if (nested && hpage) {
5859 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5860 		return -EINVAL;
5861 	}
5862 
5863 	for (i = 0; i < 16; i++)
5864 		kvm_s390_fac_base[i] |=
5865 			stfle_fac_list[i] & nonhyp_mask(i);
5866 
5867 	r = __kvm_s390_init();
5868 	if (r)
5869 		return r;
5870 
5871 	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5872 	if (r) {
5873 		__kvm_s390_exit();
5874 		return r;
5875 	}
5876 	return 0;
5877 }
5878 
kvm_s390_exit(void)5879 static void __exit kvm_s390_exit(void)
5880 {
5881 	kvm_exit();
5882 
5883 	__kvm_s390_exit();
5884 }
5885 
5886 module_init(kvm_s390_init);
5887 module_exit(kvm_s390_exit);
5888 
5889 /*
5890  * Enable autoloading of the kvm module.
5891  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5892  * since x86 takes a different approach.
5893  */
5894 #include <linux/miscdevice.h>
5895 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5896 MODULE_ALIAS("devname:kvm");
5897