xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 6502a34cfd6695929086187f63fe670cc3050e68)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
25b0c632dbSHeiko Carstens #include <linux/module.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
32b0c632dbSHeiko Carstens #include <asm/lowcore.h>
33fdf03650SFan Zhang #include <asm/etr.h>
34b0c632dbSHeiko Carstens #include <asm/pgtable.h>
351e133ab2SMartin Schwidefsky #include <asm/gmap.h>
36f5daba1dSHeiko Carstens #include <asm/nmi.h>
37a0616cdeSDavid Howells #include <asm/switch_to.h>
386d3da241SJens Freimann #include <asm/isc.h>
391526bf9cSChristian Borntraeger #include <asm/sclp.h>
400a763c78SDavid Hildenbrand #include <asm/cpacf.h>
410a763c78SDavid Hildenbrand #include <asm/etr.h>
428f2abe6aSChristian Borntraeger #include "kvm-s390.h"
43b0c632dbSHeiko Carstens #include "gaccess.h"
44b0c632dbSHeiko Carstens 
45ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
46ea2cdd27SDavid Hildenbrand #undef pr_fmt
47ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48ea2cdd27SDavid Hildenbrand 
495786fffaSCornelia Huck #define CREATE_TRACE_POINTS
505786fffaSCornelia Huck #include "trace.h"
51ade38c31SCornelia Huck #include "trace-s390.h"
525786fffaSCornelia Huck 
5341408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
54816c7667SJens Freimann #define LOCAL_IRQS 32
55816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5741408c28SThomas Huth 
58b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59b0c632dbSHeiko Carstens 
60b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
61b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
620eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
638f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
648f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
658f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
668f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
67ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
68ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
70a011eeb2SJanosch Frank 	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
71f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
7262bea5bfSPaolo Bonzini 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
733491caf2SChristian Borntraeger 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
74ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
75f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
76ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
77aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
78aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
79ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
807697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
81ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8869d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
89453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
90453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
91453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
92453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
93453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
948a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
95453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
96453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
97b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
98453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
99453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
100bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
10195ca2cb5SJanosch Frank 	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
102a3508fbeSDavid Hildenbrand 	{ "instruction_sie", VCPU_STAT(instruction_sie) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
104bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
1057697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
1065288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
10742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
10842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
1095288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
11042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
11142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
112cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1135288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1145288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1155288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
11642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
11742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
11842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
119388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
120e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
12141628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
122175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
123175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
124175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
125b0c632dbSHeiko Carstens 	{ NULL }
126b0c632dbSHeiko Carstens };
127b0c632dbSHeiko Carstens 
128a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
129a411edf1SDavid Hildenbrand static int nested;
130a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
131a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
132a411edf1SDavid Hildenbrand 
1339d8d5786SMichael Mueller /* upper facilities limit for kvm */
13460a37709SAlexander Yarygin unsigned long kvm_s390_fac_list_mask[16] = {
13560a37709SAlexander Yarygin 	0xffe6000000000000UL,
13660a37709SAlexander Yarygin 	0x005e000000000000UL,
1379d8d5786SMichael Mueller };
138b0c632dbSHeiko Carstens 
1399d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
14078c4b59fSMichael Mueller {
1419d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1429d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
14378c4b59fSMichael Mueller }
14478c4b59fSMichael Mueller 
14515c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
14615c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1470a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
1480a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
14915c9705fSDavid Hildenbrand 
1509d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
151a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
15278f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1539d8d5786SMichael Mueller 
154b0c632dbSHeiko Carstens /* Section: not file related */
15513a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
156b0c632dbSHeiko Carstens {
157b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
15810474ae8SAlexander Graf 	return 0;
159b0c632dbSHeiko Carstens }
160b0c632dbSHeiko Carstens 
161414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
162414d3b07SMartin Schwidefsky 			      unsigned long end);
1632c70fe44SChristian Borntraeger 
164fdf03650SFan Zhang /*
165fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
166fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
167fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
168fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
169fdf03650SFan Zhang  */
170fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
171fdf03650SFan Zhang 			  void *v)
172fdf03650SFan Zhang {
173fdf03650SFan Zhang 	struct kvm *kvm;
174fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
175fdf03650SFan Zhang 	int i;
176fdf03650SFan Zhang 	unsigned long long *delta = v;
177fdf03650SFan Zhang 
178fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
179fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
180fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
181fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
182db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
183db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
18491473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
18591473b48SDavid Hildenbrand 				vcpu->arch.vsie_block->epoch -= *delta;
186fdf03650SFan Zhang 		}
187fdf03650SFan Zhang 	}
188fdf03650SFan Zhang 	return NOTIFY_OK;
189fdf03650SFan Zhang }
190fdf03650SFan Zhang 
191fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
192fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
193fdf03650SFan Zhang };
194fdf03650SFan Zhang 
195b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
196b0c632dbSHeiko Carstens {
1972c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
198b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
199a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
200a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
201fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
202fdf03650SFan Zhang 				       &kvm_clock_notifier);
203b0c632dbSHeiko Carstens 	return 0;
204b0c632dbSHeiko Carstens }
205b0c632dbSHeiko Carstens 
206b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
207b0c632dbSHeiko Carstens {
208b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
209a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
210fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
211fdf03650SFan Zhang 					 &kvm_clock_notifier);
212b0c632dbSHeiko Carstens }
213b0c632dbSHeiko Carstens 
21422be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
21522be5a13SDavid Hildenbrand {
21622be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
21722be5a13SDavid Hildenbrand }
21822be5a13SDavid Hildenbrand 
2190a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
2200a763c78SDavid Hildenbrand {
2210a763c78SDavid Hildenbrand 	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
2220a763c78SDavid Hildenbrand 	int cc = 3; /* subfunction not available */
2230a763c78SDavid Hildenbrand 
2240a763c78SDavid Hildenbrand 	asm volatile(
2250a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
2260a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
2270a763c78SDavid Hildenbrand 		"	ipm	%0\n"
2280a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
2290a763c78SDavid Hildenbrand 		: "=d" (cc)
2300a763c78SDavid Hildenbrand 		: "d" (r0)
2310a763c78SDavid Hildenbrand 		: "cc");
2320a763c78SDavid Hildenbrand 	return cc == 0;
2330a763c78SDavid Hildenbrand }
2340a763c78SDavid Hildenbrand 
23522be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
23622be5a13SDavid Hildenbrand {
2370a763c78SDavid Hildenbrand 	int i;
2380a763c78SDavid Hildenbrand 
2390a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
2400a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
2410a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
2420a763c78SDavid Hildenbrand 	}
2430a763c78SDavid Hildenbrand 
2440a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
2450a763c78SDavid Hildenbrand 		etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
2460a763c78SDavid Hildenbrand 
2470a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
2480a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
2490a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
2500a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
2510a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
2520a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
2530a763c78SDavid Hildenbrand 	}
2540a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
2550a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
2560a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
2570a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
2580a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
2590a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
2600a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
2610a763c78SDavid Hildenbrand 	}
2620a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
2630a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
2640a763c78SDavid Hildenbrand 
26522be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
26622be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
267a3508fbeSDavid Hildenbrand 	/*
268a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
269a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
270a3508fbeSDavid Hildenbrand 	 */
271a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
272a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
273a3508fbeSDavid Hildenbrand 		return;
274a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
27519c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
27619c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
2770615a326SDavid Hildenbrand 	if (sclp.has_siif)
2780615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
27977d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
28077d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
281a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
282a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
2835630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
2845630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
28513ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
28613ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
2877fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
2887fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
2895d3876a8SDavid Hildenbrand 	/*
2905d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
2915d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
2925d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
2935d3876a8SDavid Hildenbrand 	 *
2945d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
2955d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
2965d3876a8SDavid Hildenbrand 	 *
2975d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
2985d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
2995d3876a8SDavid Hildenbrand 	 *
3005d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
3015d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
3025d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
3035d3876a8SDavid Hildenbrand 	 *
3045d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
3055d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
3065d3876a8SDavid Hildenbrand 	 */
30722be5a13SDavid Hildenbrand }
30822be5a13SDavid Hildenbrand 
309b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
310b0c632dbSHeiko Carstens {
31178f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
31278f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
31378f26131SChristian Borntraeger 		return -ENOMEM;
31478f26131SChristian Borntraeger 
31578f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
31678f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
31778f26131SChristian Borntraeger 		return -ENOMEM;
31878f26131SChristian Borntraeger 	}
31978f26131SChristian Borntraeger 
32022be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
32122be5a13SDavid Hildenbrand 
32284877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
32384877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
324b0c632dbSHeiko Carstens }
325b0c632dbSHeiko Carstens 
32678f26131SChristian Borntraeger void kvm_arch_exit(void)
32778f26131SChristian Borntraeger {
32878f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
32978f26131SChristian Borntraeger }
33078f26131SChristian Borntraeger 
331b0c632dbSHeiko Carstens /* Section: device related */
332b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
333b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
334b0c632dbSHeiko Carstens {
335b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
336b0c632dbSHeiko Carstens 		return s390_enable_sie();
337b0c632dbSHeiko Carstens 	return -EINVAL;
338b0c632dbSHeiko Carstens }
339b0c632dbSHeiko Carstens 
340784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
341b0c632dbSHeiko Carstens {
342d7b0b5ebSCarsten Otte 	int r;
343d7b0b5ebSCarsten Otte 
3442bd0ac4eSCarsten Otte 	switch (ext) {
345d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
346b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
34752e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
3481efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3491efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
3501efd0f59SCarsten Otte #endif
3513c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
35260b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
35314eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
354d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
355fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
35610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
357c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
358d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
35978599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
360f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
3616352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
36247b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
3632444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
364e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
36530ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
366816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
367*6502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
368d7b0b5ebSCarsten Otte 		r = 1;
369d7b0b5ebSCarsten Otte 		break;
37041408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
37141408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
37241408c28SThomas Huth 		break;
373e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
374e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
37576a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
37676a6dd72SDavid Hildenbrand 		if (sclp.has_esca && sclp.has_64bscao)
37776a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
378e726b1bdSChristian Borntraeger 		break;
379e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
380e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
381e1e2e605SNick Wang 		break;
3821526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
383abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
3841526bf9cSChristian Borntraeger 		break;
38568c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
38668c55750SEric Farman 		r = MACHINE_HAS_VX;
38768c55750SEric Farman 		break;
388c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
389c6e5f166SFan Zhang 		r = test_facility(64);
390c6e5f166SFan Zhang 		break;
3912bd0ac4eSCarsten Otte 	default:
392d7b0b5ebSCarsten Otte 		r = 0;
393b0c632dbSHeiko Carstens 	}
394d7b0b5ebSCarsten Otte 	return r;
3952bd0ac4eSCarsten Otte }
396b0c632dbSHeiko Carstens 
39715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
39815f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
39915f36ebdSJason J. Herne {
40015f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
40115f36ebdSJason J. Herne 	unsigned long address;
40215f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
40315f36ebdSJason J. Herne 
40415f36ebdSJason J. Herne 	/* Loop over all guest pages */
40515f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
40615f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
40715f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
40815f36ebdSJason J. Herne 
4091e133ab2SMartin Schwidefsky 		if (test_and_clear_guest_dirty(gmap->mm, address))
41015f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
4111763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
4121763f8d0SChristian Borntraeger 			return;
41370c88a00SChristian Borntraeger 		cond_resched();
41415f36ebdSJason J. Herne 	}
41515f36ebdSJason J. Herne }
41615f36ebdSJason J. Herne 
417b0c632dbSHeiko Carstens /* Section: vm related */
418a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
419a6e2f683SEugene (jno) Dvurechenski 
420b0c632dbSHeiko Carstens /*
421b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
422b0c632dbSHeiko Carstens  */
423b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
424b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
425b0c632dbSHeiko Carstens {
42615f36ebdSJason J. Herne 	int r;
42715f36ebdSJason J. Herne 	unsigned long n;
4289f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
42915f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
43015f36ebdSJason J. Herne 	int is_dirty = 0;
43115f36ebdSJason J. Herne 
43215f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
43315f36ebdSJason J. Herne 
43415f36ebdSJason J. Herne 	r = -EINVAL;
43515f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
43615f36ebdSJason J. Herne 		goto out;
43715f36ebdSJason J. Herne 
4389f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
4399f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
44015f36ebdSJason J. Herne 	r = -ENOENT;
44115f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
44215f36ebdSJason J. Herne 		goto out;
44315f36ebdSJason J. Herne 
44415f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
44515f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
44615f36ebdSJason J. Herne 	if (r)
44715f36ebdSJason J. Herne 		goto out;
44815f36ebdSJason J. Herne 
44915f36ebdSJason J. Herne 	/* Clear the dirty log */
45015f36ebdSJason J. Herne 	if (is_dirty) {
45115f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
45215f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
45315f36ebdSJason J. Herne 	}
45415f36ebdSJason J. Herne 	r = 0;
45515f36ebdSJason J. Herne out:
45615f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
45715f36ebdSJason J. Herne 	return r;
458b0c632dbSHeiko Carstens }
459b0c632dbSHeiko Carstens 
460*6502a34cSDavid Hildenbrand static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
461*6502a34cSDavid Hildenbrand {
462*6502a34cSDavid Hildenbrand 	unsigned int i;
463*6502a34cSDavid Hildenbrand 	struct kvm_vcpu *vcpu;
464*6502a34cSDavid Hildenbrand 
465*6502a34cSDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
466*6502a34cSDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
467*6502a34cSDavid Hildenbrand 	}
468*6502a34cSDavid Hildenbrand }
469*6502a34cSDavid Hildenbrand 
470d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
471d938dc55SCornelia Huck {
472d938dc55SCornelia Huck 	int r;
473d938dc55SCornelia Huck 
474d938dc55SCornelia Huck 	if (cap->flags)
475d938dc55SCornelia Huck 		return -EINVAL;
476d938dc55SCornelia Huck 
477d938dc55SCornelia Huck 	switch (cap->cap) {
47884223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
479c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
48084223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
48184223598SCornelia Huck 		r = 0;
48284223598SCornelia Huck 		break;
4832444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
484c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
4852444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
4862444b352SDavid Hildenbrand 		r = 0;
4872444b352SDavid Hildenbrand 		break;
48868c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
4895967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
490a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
4915967c17bSDavid Hildenbrand 			r = -EBUSY;
4925967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
493c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
494c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
49518280d8bSMichael Mueller 			r = 0;
49618280d8bSMichael Mueller 		} else
49718280d8bSMichael Mueller 			r = -EINVAL;
4985967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
499c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
500c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
50168c55750SEric Farman 		break;
502c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
503c6e5f166SFan Zhang 		r = -EINVAL;
504c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
505a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
506c6e5f166SFan Zhang 			r = -EBUSY;
507c6e5f166SFan Zhang 		} else if (test_facility(64)) {
508c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
509c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
510c6e5f166SFan Zhang 			r = 0;
511c6e5f166SFan Zhang 		}
512c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
513c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
514c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
515c6e5f166SFan Zhang 		break;
516e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
517c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
518e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
519e44fc8c9SEkaterina Tumanova 		r = 0;
520e44fc8c9SEkaterina Tumanova 		break;
521*6502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
522*6502a34cSDavid Hildenbrand 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
523*6502a34cSDavid Hildenbrand 		kvm->arch.user_instr0 = 1;
524*6502a34cSDavid Hildenbrand 		icpt_operexc_on_all_vcpus(kvm);
525*6502a34cSDavid Hildenbrand 		r = 0;
526*6502a34cSDavid Hildenbrand 		break;
527d938dc55SCornelia Huck 	default:
528d938dc55SCornelia Huck 		r = -EINVAL;
529d938dc55SCornelia Huck 		break;
530d938dc55SCornelia Huck 	}
531d938dc55SCornelia Huck 	return r;
532d938dc55SCornelia Huck }
533d938dc55SCornelia Huck 
5348c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5358c0a7ce6SDominik Dingel {
5368c0a7ce6SDominik Dingel 	int ret;
5378c0a7ce6SDominik Dingel 
5388c0a7ce6SDominik Dingel 	switch (attr->attr) {
5398c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
5408c0a7ce6SDominik Dingel 		ret = 0;
541c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
542a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
543a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
5448c0a7ce6SDominik Dingel 			ret = -EFAULT;
5458c0a7ce6SDominik Dingel 		break;
5468c0a7ce6SDominik Dingel 	default:
5478c0a7ce6SDominik Dingel 		ret = -ENXIO;
5488c0a7ce6SDominik Dingel 		break;
5498c0a7ce6SDominik Dingel 	}
5508c0a7ce6SDominik Dingel 	return ret;
5518c0a7ce6SDominik Dingel }
5528c0a7ce6SDominik Dingel 
5538c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5544f718eabSDominik Dingel {
5554f718eabSDominik Dingel 	int ret;
5564f718eabSDominik Dingel 	unsigned int idx;
5574f718eabSDominik Dingel 	switch (attr->attr) {
5584f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
559f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
560c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
561e6db1d61SDominik Dingel 			break;
562e6db1d61SDominik Dingel 
5634f718eabSDominik Dingel 		ret = -EBUSY;
564c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
5654f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
566a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
5674f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
5684f718eabSDominik Dingel 			ret = 0;
5694f718eabSDominik Dingel 		}
5704f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5714f718eabSDominik Dingel 		break;
5724f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
573f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
574f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
575f9cbd9b0SDavid Hildenbrand 			break;
576c3489155SDominik Dingel 		ret = -EINVAL;
577c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
578c3489155SDominik Dingel 			break;
579c3489155SDominik Dingel 
580c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
5814f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
5824f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
583a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
5844f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
5854f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5864f718eabSDominik Dingel 		ret = 0;
5874f718eabSDominik Dingel 		break;
5888c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
5898c0a7ce6SDominik Dingel 		unsigned long new_limit;
5908c0a7ce6SDominik Dingel 
5918c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
5928c0a7ce6SDominik Dingel 			return -EINVAL;
5938c0a7ce6SDominik Dingel 
5948c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
5958c0a7ce6SDominik Dingel 			return -EFAULT;
5968c0a7ce6SDominik Dingel 
597a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
598a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
5998c0a7ce6SDominik Dingel 			return -E2BIG;
6008c0a7ce6SDominik Dingel 
601a3a92c31SDominik Dingel 		if (!new_limit)
602a3a92c31SDominik Dingel 			return -EINVAL;
603a3a92c31SDominik Dingel 
6046ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
605a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
606a3a92c31SDominik Dingel 			new_limit -= 1;
607a3a92c31SDominik Dingel 
6088c0a7ce6SDominik Dingel 		ret = -EBUSY;
6098c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
610a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
6116ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
6126ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
6138c0a7ce6SDominik Dingel 
6148c0a7ce6SDominik Dingel 			if (!new) {
6158c0a7ce6SDominik Dingel 				ret = -ENOMEM;
6168c0a7ce6SDominik Dingel 			} else {
6176ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
6188c0a7ce6SDominik Dingel 				new->private = kvm;
6198c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
6208c0a7ce6SDominik Dingel 				ret = 0;
6218c0a7ce6SDominik Dingel 			}
6228c0a7ce6SDominik Dingel 		}
6238c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
624a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
625a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
626a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
6278c0a7ce6SDominik Dingel 		break;
6288c0a7ce6SDominik Dingel 	}
6294f718eabSDominik Dingel 	default:
6304f718eabSDominik Dingel 		ret = -ENXIO;
6314f718eabSDominik Dingel 		break;
6324f718eabSDominik Dingel 	}
6334f718eabSDominik Dingel 	return ret;
6344f718eabSDominik Dingel }
6354f718eabSDominik Dingel 
636a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
637a374e892STony Krowiak 
638a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
639a374e892STony Krowiak {
640a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
641a374e892STony Krowiak 	int i;
642a374e892STony Krowiak 
6439d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
644a374e892STony Krowiak 		return -EINVAL;
645a374e892STony Krowiak 
646a374e892STony Krowiak 	mutex_lock(&kvm->lock);
647a374e892STony Krowiak 	switch (attr->attr) {
648a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
649a374e892STony Krowiak 		get_random_bytes(
650a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
651a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
652a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
653c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
654a374e892STony Krowiak 		break;
655a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
656a374e892STony Krowiak 		get_random_bytes(
657a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
658a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
659a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
660c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
661a374e892STony Krowiak 		break;
662a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
663a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
664a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
665a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
666c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
667a374e892STony Krowiak 		break;
668a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
669a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
670a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
671a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
672c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
673a374e892STony Krowiak 		break;
674a374e892STony Krowiak 	default:
675a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
676a374e892STony Krowiak 		return -ENXIO;
677a374e892STony Krowiak 	}
678a374e892STony Krowiak 
679a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
680a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
681a374e892STony Krowiak 		exit_sie(vcpu);
682a374e892STony Krowiak 	}
683a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
684a374e892STony Krowiak 	return 0;
685a374e892STony Krowiak }
686a374e892STony Krowiak 
68772f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
68872f25020SJason J. Herne {
68972f25020SJason J. Herne 	u8 gtod_high;
69072f25020SJason J. Herne 
69172f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
69272f25020SJason J. Herne 					   sizeof(gtod_high)))
69372f25020SJason J. Herne 		return -EFAULT;
69472f25020SJason J. Herne 
69572f25020SJason J. Herne 	if (gtod_high != 0)
69672f25020SJason J. Herne 		return -EINVAL;
69758c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
69872f25020SJason J. Herne 
69972f25020SJason J. Herne 	return 0;
70072f25020SJason J. Herne }
70172f25020SJason J. Herne 
70272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
70372f25020SJason J. Herne {
7045a3d883aSDavid Hildenbrand 	u64 gtod;
70572f25020SJason J. Herne 
70672f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
70772f25020SJason J. Herne 		return -EFAULT;
70872f25020SJason J. Herne 
70925ed1675SDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, gtod);
71058c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
71172f25020SJason J. Herne 	return 0;
71272f25020SJason J. Herne }
71372f25020SJason J. Herne 
71472f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
71572f25020SJason J. Herne {
71672f25020SJason J. Herne 	int ret;
71772f25020SJason J. Herne 
71872f25020SJason J. Herne 	if (attr->flags)
71972f25020SJason J. Herne 		return -EINVAL;
72072f25020SJason J. Herne 
72172f25020SJason J. Herne 	switch (attr->attr) {
72272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
72372f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
72472f25020SJason J. Herne 		break;
72572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
72672f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
72772f25020SJason J. Herne 		break;
72872f25020SJason J. Herne 	default:
72972f25020SJason J. Herne 		ret = -ENXIO;
73072f25020SJason J. Herne 		break;
73172f25020SJason J. Herne 	}
73272f25020SJason J. Herne 	return ret;
73372f25020SJason J. Herne }
73472f25020SJason J. Herne 
73572f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
73672f25020SJason J. Herne {
73772f25020SJason J. Herne 	u8 gtod_high = 0;
73872f25020SJason J. Herne 
73972f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
74072f25020SJason J. Herne 					 sizeof(gtod_high)))
74172f25020SJason J. Herne 		return -EFAULT;
74258c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
74372f25020SJason J. Herne 
74472f25020SJason J. Herne 	return 0;
74572f25020SJason J. Herne }
74672f25020SJason J. Herne 
74772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
74872f25020SJason J. Herne {
7495a3d883aSDavid Hildenbrand 	u64 gtod;
75072f25020SJason J. Herne 
75160417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
75272f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
75372f25020SJason J. Herne 		return -EFAULT;
75458c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
75572f25020SJason J. Herne 
75672f25020SJason J. Herne 	return 0;
75772f25020SJason J. Herne }
75872f25020SJason J. Herne 
75972f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
76072f25020SJason J. Herne {
76172f25020SJason J. Herne 	int ret;
76272f25020SJason J. Herne 
76372f25020SJason J. Herne 	if (attr->flags)
76472f25020SJason J. Herne 		return -EINVAL;
76572f25020SJason J. Herne 
76672f25020SJason J. Herne 	switch (attr->attr) {
76772f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
76872f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
76972f25020SJason J. Herne 		break;
77072f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
77172f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
77272f25020SJason J. Herne 		break;
77372f25020SJason J. Herne 	default:
77472f25020SJason J. Herne 		ret = -ENXIO;
77572f25020SJason J. Herne 		break;
77672f25020SJason J. Herne 	}
77772f25020SJason J. Herne 	return ret;
77872f25020SJason J. Herne }
77972f25020SJason J. Herne 
780658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
781658b6edaSMichael Mueller {
782658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
783053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
784658b6edaSMichael Mueller 	int ret = 0;
785658b6edaSMichael Mueller 
786658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
787a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
788658b6edaSMichael Mueller 		ret = -EBUSY;
789658b6edaSMichael Mueller 		goto out;
790658b6edaSMichael Mueller 	}
791658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
792658b6edaSMichael Mueller 	if (!proc) {
793658b6edaSMichael Mueller 		ret = -ENOMEM;
794658b6edaSMichael Mueller 		goto out;
795658b6edaSMichael Mueller 	}
796658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
797658b6edaSMichael Mueller 			    sizeof(*proc))) {
7989bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
799053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
800053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
801053dd230SDavid Hildenbrand 		if (lowest_ibc) {
802053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
803053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
804053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
805053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
806053dd230SDavid Hildenbrand 			else
807658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
808053dd230SDavid Hildenbrand 		}
809c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
810658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
811658b6edaSMichael Mueller 	} else
812658b6edaSMichael Mueller 		ret = -EFAULT;
813658b6edaSMichael Mueller 	kfree(proc);
814658b6edaSMichael Mueller out:
815658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
816658b6edaSMichael Mueller 	return ret;
817658b6edaSMichael Mueller }
818658b6edaSMichael Mueller 
81915c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
82015c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
82115c9705fSDavid Hildenbrand {
82215c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
82315c9705fSDavid Hildenbrand 	int ret = -EBUSY;
82415c9705fSDavid Hildenbrand 
82515c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
82615c9705fSDavid Hildenbrand 		return -EFAULT;
82715c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
82815c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
82915c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
83015c9705fSDavid Hildenbrand 		return -EINVAL;
83115c9705fSDavid Hildenbrand 
83215c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
83315c9705fSDavid Hildenbrand 	if (!atomic_read(&kvm->online_vcpus)) {
83415c9705fSDavid Hildenbrand 		bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
83515c9705fSDavid Hildenbrand 			    KVM_S390_VM_CPU_FEAT_NR_BITS);
83615c9705fSDavid Hildenbrand 		ret = 0;
83715c9705fSDavid Hildenbrand 	}
83815c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
83915c9705fSDavid Hildenbrand 	return ret;
84015c9705fSDavid Hildenbrand }
84115c9705fSDavid Hildenbrand 
8420a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
8430a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
8440a763c78SDavid Hildenbrand {
8450a763c78SDavid Hildenbrand 	/*
8460a763c78SDavid Hildenbrand 	 * Once supported by kernel + hw, we have to store the subfunctions
8470a763c78SDavid Hildenbrand 	 * in kvm->arch and remember that user space configured them.
8480a763c78SDavid Hildenbrand 	 */
8490a763c78SDavid Hildenbrand 	return -ENXIO;
8500a763c78SDavid Hildenbrand }
8510a763c78SDavid Hildenbrand 
852658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
853658b6edaSMichael Mueller {
854658b6edaSMichael Mueller 	int ret = -ENXIO;
855658b6edaSMichael Mueller 
856658b6edaSMichael Mueller 	switch (attr->attr) {
857658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
858658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
859658b6edaSMichael Mueller 		break;
86015c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
86115c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
86215c9705fSDavid Hildenbrand 		break;
8630a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
8640a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
8650a763c78SDavid Hildenbrand 		break;
866658b6edaSMichael Mueller 	}
867658b6edaSMichael Mueller 	return ret;
868658b6edaSMichael Mueller }
869658b6edaSMichael Mueller 
870658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
871658b6edaSMichael Mueller {
872658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
873658b6edaSMichael Mueller 	int ret = 0;
874658b6edaSMichael Mueller 
875658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
876658b6edaSMichael Mueller 	if (!proc) {
877658b6edaSMichael Mueller 		ret = -ENOMEM;
878658b6edaSMichael Mueller 		goto out;
879658b6edaSMichael Mueller 	}
8809bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
881658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
882c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
883c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
884658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
885658b6edaSMichael Mueller 		ret = -EFAULT;
886658b6edaSMichael Mueller 	kfree(proc);
887658b6edaSMichael Mueller out:
888658b6edaSMichael Mueller 	return ret;
889658b6edaSMichael Mueller }
890658b6edaSMichael Mueller 
891658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
892658b6edaSMichael Mueller {
893658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
894658b6edaSMichael Mueller 	int ret = 0;
895658b6edaSMichael Mueller 
896658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
897658b6edaSMichael Mueller 	if (!mach) {
898658b6edaSMichael Mueller 		ret = -ENOMEM;
899658b6edaSMichael Mueller 		goto out;
900658b6edaSMichael Mueller 	}
901658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
90237c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
903c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
904981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
905658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
90694422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
907658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
908658b6edaSMichael Mueller 		ret = -EFAULT;
909658b6edaSMichael Mueller 	kfree(mach);
910658b6edaSMichael Mueller out:
911658b6edaSMichael Mueller 	return ret;
912658b6edaSMichael Mueller }
913658b6edaSMichael Mueller 
91415c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
91515c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
91615c9705fSDavid Hildenbrand {
91715c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
91815c9705fSDavid Hildenbrand 
91915c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
92015c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
92115c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
92215c9705fSDavid Hildenbrand 		return -EFAULT;
92315c9705fSDavid Hildenbrand 	return 0;
92415c9705fSDavid Hildenbrand }
92515c9705fSDavid Hildenbrand 
92615c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
92715c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
92815c9705fSDavid Hildenbrand {
92915c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
93015c9705fSDavid Hildenbrand 
93115c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat,
93215c9705fSDavid Hildenbrand 		    kvm_s390_available_cpu_feat,
93315c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
93415c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
93515c9705fSDavid Hildenbrand 		return -EFAULT;
93615c9705fSDavid Hildenbrand 	return 0;
93715c9705fSDavid Hildenbrand }
93815c9705fSDavid Hildenbrand 
9390a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
9400a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
9410a763c78SDavid Hildenbrand {
9420a763c78SDavid Hildenbrand 	/*
9430a763c78SDavid Hildenbrand 	 * Once we can actually configure subfunctions (kernel + hw support),
9440a763c78SDavid Hildenbrand 	 * we have to check if they were already set by user space, if so copy
9450a763c78SDavid Hildenbrand 	 * them from kvm->arch.
9460a763c78SDavid Hildenbrand 	 */
9470a763c78SDavid Hildenbrand 	return -ENXIO;
9480a763c78SDavid Hildenbrand }
9490a763c78SDavid Hildenbrand 
9500a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
9510a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
9520a763c78SDavid Hildenbrand {
9530a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
9540a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
9550a763c78SDavid Hildenbrand 		return -EFAULT;
9560a763c78SDavid Hildenbrand 	return 0;
9570a763c78SDavid Hildenbrand }
958658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
959658b6edaSMichael Mueller {
960658b6edaSMichael Mueller 	int ret = -ENXIO;
961658b6edaSMichael Mueller 
962658b6edaSMichael Mueller 	switch (attr->attr) {
963658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
964658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
965658b6edaSMichael Mueller 		break;
966658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
967658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
968658b6edaSMichael Mueller 		break;
96915c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
97015c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
97115c9705fSDavid Hildenbrand 		break;
97215c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
97315c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
97415c9705fSDavid Hildenbrand 		break;
9750a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
9760a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
9770a763c78SDavid Hildenbrand 		break;
9780a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
9790a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
9800a763c78SDavid Hildenbrand 		break;
981658b6edaSMichael Mueller 	}
982658b6edaSMichael Mueller 	return ret;
983658b6edaSMichael Mueller }
984658b6edaSMichael Mueller 
985f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
986f2061656SDominik Dingel {
987f2061656SDominik Dingel 	int ret;
988f2061656SDominik Dingel 
989f2061656SDominik Dingel 	switch (attr->group) {
9904f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9918c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
9924f718eabSDominik Dingel 		break;
99372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
99472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
99572f25020SJason J. Herne 		break;
996658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
997658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
998658b6edaSMichael Mueller 		break;
999a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1000a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1001a374e892STony Krowiak 		break;
1002f2061656SDominik Dingel 	default:
1003f2061656SDominik Dingel 		ret = -ENXIO;
1004f2061656SDominik Dingel 		break;
1005f2061656SDominik Dingel 	}
1006f2061656SDominik Dingel 
1007f2061656SDominik Dingel 	return ret;
1008f2061656SDominik Dingel }
1009f2061656SDominik Dingel 
1010f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1011f2061656SDominik Dingel {
10128c0a7ce6SDominik Dingel 	int ret;
10138c0a7ce6SDominik Dingel 
10148c0a7ce6SDominik Dingel 	switch (attr->group) {
10158c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
10168c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
10178c0a7ce6SDominik Dingel 		break;
101872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
101972f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
102072f25020SJason J. Herne 		break;
1021658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1022658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1023658b6edaSMichael Mueller 		break;
10248c0a7ce6SDominik Dingel 	default:
10258c0a7ce6SDominik Dingel 		ret = -ENXIO;
10268c0a7ce6SDominik Dingel 		break;
10278c0a7ce6SDominik Dingel 	}
10288c0a7ce6SDominik Dingel 
10298c0a7ce6SDominik Dingel 	return ret;
1030f2061656SDominik Dingel }
1031f2061656SDominik Dingel 
1032f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1033f2061656SDominik Dingel {
1034f2061656SDominik Dingel 	int ret;
1035f2061656SDominik Dingel 
1036f2061656SDominik Dingel 	switch (attr->group) {
10374f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
10384f718eabSDominik Dingel 		switch (attr->attr) {
10394f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
10404f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1041f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1042f9cbd9b0SDavid Hildenbrand 			break;
10438c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
10444f718eabSDominik Dingel 			ret = 0;
10454f718eabSDominik Dingel 			break;
10464f718eabSDominik Dingel 		default:
10474f718eabSDominik Dingel 			ret = -ENXIO;
10484f718eabSDominik Dingel 			break;
10494f718eabSDominik Dingel 		}
10504f718eabSDominik Dingel 		break;
105172f25020SJason J. Herne 	case KVM_S390_VM_TOD:
105272f25020SJason J. Herne 		switch (attr->attr) {
105372f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
105472f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
105572f25020SJason J. Herne 			ret = 0;
105672f25020SJason J. Herne 			break;
105772f25020SJason J. Herne 		default:
105872f25020SJason J. Herne 			ret = -ENXIO;
105972f25020SJason J. Herne 			break;
106072f25020SJason J. Herne 		}
106172f25020SJason J. Herne 		break;
1062658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1063658b6edaSMichael Mueller 		switch (attr->attr) {
1064658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1065658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
106615c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
106715c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
10680a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1069658b6edaSMichael Mueller 			ret = 0;
1070658b6edaSMichael Mueller 			break;
10710a763c78SDavid Hildenbrand 		/* configuring subfunctions is not supported yet */
10720a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1073658b6edaSMichael Mueller 		default:
1074658b6edaSMichael Mueller 			ret = -ENXIO;
1075658b6edaSMichael Mueller 			break;
1076658b6edaSMichael Mueller 		}
1077658b6edaSMichael Mueller 		break;
1078a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1079a374e892STony Krowiak 		switch (attr->attr) {
1080a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1081a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1082a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1083a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1084a374e892STony Krowiak 			ret = 0;
1085a374e892STony Krowiak 			break;
1086a374e892STony Krowiak 		default:
1087a374e892STony Krowiak 			ret = -ENXIO;
1088a374e892STony Krowiak 			break;
1089a374e892STony Krowiak 		}
1090a374e892STony Krowiak 		break;
1091f2061656SDominik Dingel 	default:
1092f2061656SDominik Dingel 		ret = -ENXIO;
1093f2061656SDominik Dingel 		break;
1094f2061656SDominik Dingel 	}
1095f2061656SDominik Dingel 
1096f2061656SDominik Dingel 	return ret;
1097f2061656SDominik Dingel }
1098f2061656SDominik Dingel 
109930ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
110030ee2a98SJason J. Herne {
110130ee2a98SJason J. Herne 	uint8_t *keys;
110230ee2a98SJason J. Herne 	uint64_t hva;
110330ee2a98SJason J. Herne 	int i, r = 0;
110430ee2a98SJason J. Herne 
110530ee2a98SJason J. Herne 	if (args->flags != 0)
110630ee2a98SJason J. Herne 		return -EINVAL;
110730ee2a98SJason J. Herne 
110830ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
110930ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
111030ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
111130ee2a98SJason J. Herne 
111230ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
111330ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
111430ee2a98SJason J. Herne 		return -EINVAL;
111530ee2a98SJason J. Herne 
111630ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
111730ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
111830ee2a98SJason J. Herne 	if (!keys)
111930ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
112030ee2a98SJason J. Herne 	if (!keys)
112130ee2a98SJason J. Herne 		return -ENOMEM;
112230ee2a98SJason J. Herne 
1123d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
112430ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
112530ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
112630ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
112730ee2a98SJason J. Herne 			r = -EFAULT;
1128d3ed1ceeSMartin Schwidefsky 			break;
112930ee2a98SJason J. Herne 		}
113030ee2a98SJason J. Herne 
1131154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1132154c8c19SDavid Hildenbrand 		if (r)
1133d3ed1ceeSMartin Schwidefsky 			break;
113430ee2a98SJason J. Herne 	}
1135d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
113630ee2a98SJason J. Herne 
1137d3ed1ceeSMartin Schwidefsky 	if (!r) {
113830ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
113930ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
114030ee2a98SJason J. Herne 		if (r)
114130ee2a98SJason J. Herne 			r = -EFAULT;
1142d3ed1ceeSMartin Schwidefsky 	}
1143d3ed1ceeSMartin Schwidefsky 
114430ee2a98SJason J. Herne 	kvfree(keys);
114530ee2a98SJason J. Herne 	return r;
114630ee2a98SJason J. Herne }
114730ee2a98SJason J. Herne 
114830ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
114930ee2a98SJason J. Herne {
115030ee2a98SJason J. Herne 	uint8_t *keys;
115130ee2a98SJason J. Herne 	uint64_t hva;
115230ee2a98SJason J. Herne 	int i, r = 0;
115330ee2a98SJason J. Herne 
115430ee2a98SJason J. Herne 	if (args->flags != 0)
115530ee2a98SJason J. Herne 		return -EINVAL;
115630ee2a98SJason J. Herne 
115730ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
115830ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
115930ee2a98SJason J. Herne 		return -EINVAL;
116030ee2a98SJason J. Herne 
116130ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
116230ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
116330ee2a98SJason J. Herne 	if (!keys)
116430ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
116530ee2a98SJason J. Herne 	if (!keys)
116630ee2a98SJason J. Herne 		return -ENOMEM;
116730ee2a98SJason J. Herne 
116830ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
116930ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
117030ee2a98SJason J. Herne 	if (r) {
117130ee2a98SJason J. Herne 		r = -EFAULT;
117230ee2a98SJason J. Herne 		goto out;
117330ee2a98SJason J. Herne 	}
117430ee2a98SJason J. Herne 
117530ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
117614d4a425SDominik Dingel 	r = s390_enable_skey();
117714d4a425SDominik Dingel 	if (r)
117814d4a425SDominik Dingel 		goto out;
117930ee2a98SJason J. Herne 
1180d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
118130ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
118230ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
118330ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
118430ee2a98SJason J. Herne 			r = -EFAULT;
1185d3ed1ceeSMartin Schwidefsky 			break;
118630ee2a98SJason J. Herne 		}
118730ee2a98SJason J. Herne 
118830ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
118930ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
119030ee2a98SJason J. Herne 			r = -EINVAL;
1191d3ed1ceeSMartin Schwidefsky 			break;
119230ee2a98SJason J. Herne 		}
119330ee2a98SJason J. Herne 
1194fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
119530ee2a98SJason J. Herne 		if (r)
1196d3ed1ceeSMartin Schwidefsky 			break;
119730ee2a98SJason J. Herne 	}
1198d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
119930ee2a98SJason J. Herne out:
120030ee2a98SJason J. Herne 	kvfree(keys);
120130ee2a98SJason J. Herne 	return r;
120230ee2a98SJason J. Herne }
120330ee2a98SJason J. Herne 
1204b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
1205b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
1206b0c632dbSHeiko Carstens {
1207b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
1208b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1209f2061656SDominik Dingel 	struct kvm_device_attr attr;
1210b0c632dbSHeiko Carstens 	int r;
1211b0c632dbSHeiko Carstens 
1212b0c632dbSHeiko Carstens 	switch (ioctl) {
1213ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
1214ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1215ba5c1e9bSCarsten Otte 
1216ba5c1e9bSCarsten Otte 		r = -EFAULT;
1217ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
1218ba5c1e9bSCarsten Otte 			break;
1219ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
1220ba5c1e9bSCarsten Otte 		break;
1221ba5c1e9bSCarsten Otte 	}
1222d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
1223d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
1224d938dc55SCornelia Huck 		r = -EFAULT;
1225d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1226d938dc55SCornelia Huck 			break;
1227d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1228d938dc55SCornelia Huck 		break;
1229d938dc55SCornelia Huck 	}
123084223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
123184223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
123284223598SCornelia Huck 
123384223598SCornelia Huck 		r = -EINVAL;
123484223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
123584223598SCornelia Huck 			/* Set up dummy routing. */
123684223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
1237152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
123884223598SCornelia Huck 		}
123984223598SCornelia Huck 		break;
124084223598SCornelia Huck 	}
1241f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
1242f2061656SDominik Dingel 		r = -EFAULT;
1243f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1244f2061656SDominik Dingel 			break;
1245f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
1246f2061656SDominik Dingel 		break;
1247f2061656SDominik Dingel 	}
1248f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
1249f2061656SDominik Dingel 		r = -EFAULT;
1250f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1251f2061656SDominik Dingel 			break;
1252f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
1253f2061656SDominik Dingel 		break;
1254f2061656SDominik Dingel 	}
1255f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
1256f2061656SDominik Dingel 		r = -EFAULT;
1257f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1258f2061656SDominik Dingel 			break;
1259f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
1260f2061656SDominik Dingel 		break;
1261f2061656SDominik Dingel 	}
126230ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
126330ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
126430ee2a98SJason J. Herne 
126530ee2a98SJason J. Herne 		r = -EFAULT;
126630ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
126730ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
126830ee2a98SJason J. Herne 			break;
126930ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
127030ee2a98SJason J. Herne 		break;
127130ee2a98SJason J. Herne 	}
127230ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
127330ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
127430ee2a98SJason J. Herne 
127530ee2a98SJason J. Herne 		r = -EFAULT;
127630ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
127730ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
127830ee2a98SJason J. Herne 			break;
127930ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
128030ee2a98SJason J. Herne 		break;
128130ee2a98SJason J. Herne 	}
1282b0c632dbSHeiko Carstens 	default:
1283367e1319SAvi Kivity 		r = -ENOTTY;
1284b0c632dbSHeiko Carstens 	}
1285b0c632dbSHeiko Carstens 
1286b0c632dbSHeiko Carstens 	return r;
1287b0c632dbSHeiko Carstens }
1288b0c632dbSHeiko Carstens 
128945c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
129045c9b47cSTony Krowiak {
129145c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
129286044c8cSChristian Borntraeger 	u32 cc = 0;
129345c9b47cSTony Krowiak 
129486044c8cSChristian Borntraeger 	memset(config, 0, 128);
129545c9b47cSTony Krowiak 	asm volatile(
129645c9b47cSTony Krowiak 		"lgr 0,%1\n"
129745c9b47cSTony Krowiak 		"lgr 2,%2\n"
129845c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
129986044c8cSChristian Borntraeger 		"0: ipm %0\n"
130045c9b47cSTony Krowiak 		"srl %0,28\n"
130186044c8cSChristian Borntraeger 		"1:\n"
130286044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
130386044c8cSChristian Borntraeger 		: "+r" (cc)
130445c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
130545c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
130645c9b47cSTony Krowiak 	);
130745c9b47cSTony Krowiak 
130845c9b47cSTony Krowiak 	return cc;
130945c9b47cSTony Krowiak }
131045c9b47cSTony Krowiak 
131145c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
131245c9b47cSTony Krowiak {
131345c9b47cSTony Krowiak 	u8 config[128];
131445c9b47cSTony Krowiak 	int cc;
131545c9b47cSTony Krowiak 
1316a6aacc3fSHeiko Carstens 	if (test_facility(12)) {
131745c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
131845c9b47cSTony Krowiak 
131945c9b47cSTony Krowiak 		if (cc)
132045c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
132145c9b47cSTony Krowiak 		else
132245c9b47cSTony Krowiak 			return config[0] & 0x40;
132345c9b47cSTony Krowiak 	}
132445c9b47cSTony Krowiak 
132545c9b47cSTony Krowiak 	return 0;
132645c9b47cSTony Krowiak }
132745c9b47cSTony Krowiak 
132845c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
132945c9b47cSTony Krowiak {
133045c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
133145c9b47cSTony Krowiak 
133245c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
133345c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
133445c9b47cSTony Krowiak 	else
133545c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
133645c9b47cSTony Krowiak }
133745c9b47cSTony Krowiak 
13389bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
13399d8d5786SMichael Mueller {
13409bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
13419bb0ec09SDavid Hildenbrand 
13429bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
13439bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
13449bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
13459d8d5786SMichael Mueller }
13469d8d5786SMichael Mueller 
1347c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
13485102ee87STony Krowiak {
13499d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
1350c54f0d6aSDavid Hildenbrand 		return;
13515102ee87STony Krowiak 
1352c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
135345c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
13545102ee87STony Krowiak 
1355ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1356ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1357ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1358ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1359ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1360ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1361ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
13625102ee87STony Krowiak }
13635102ee87STony Krowiak 
13647d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
13657d43bafcSEugene (jno) Dvurechenski {
13667d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
13675e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
13687d43bafcSEugene (jno) Dvurechenski 	else
13697d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
13707d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
13717d43bafcSEugene (jno) Dvurechenski }
13727d43bafcSEugene (jno) Dvurechenski 
1373e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1374b0c632dbSHeiko Carstens {
137576a6dd72SDavid Hildenbrand 	gfp_t alloc_flags = GFP_KERNEL;
13769d8d5786SMichael Mueller 	int i, rc;
1377b0c632dbSHeiko Carstens 	char debug_name[16];
1378f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1379b0c632dbSHeiko Carstens 
1380e08b9637SCarsten Otte 	rc = -EINVAL;
1381e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1382e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1383e08b9637SCarsten Otte 		goto out_err;
1384e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1385e08b9637SCarsten Otte 		goto out_err;
1386e08b9637SCarsten Otte #else
1387e08b9637SCarsten Otte 	if (type)
1388e08b9637SCarsten Otte 		goto out_err;
1389e08b9637SCarsten Otte #endif
1390e08b9637SCarsten Otte 
1391b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1392b0c632dbSHeiko Carstens 	if (rc)
1393d89f5effSJan Kiszka 		goto out_err;
1394b0c632dbSHeiko Carstens 
1395b290411aSCarsten Otte 	rc = -ENOMEM;
1396b290411aSCarsten Otte 
13977d0a5e62SJanosch Frank 	ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
13987d0a5e62SJanosch Frank 
13997d43bafcSEugene (jno) Dvurechenski 	kvm->arch.use_esca = 0; /* start with basic SCA */
140076a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
140176a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
14025e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
140376a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1404b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1405d89f5effSJan Kiszka 		goto out_err;
1406f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1407c5c2c393SDavid Hildenbrand 	sca_offset += 16;
1408bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1409c5c2c393SDavid Hildenbrand 		sca_offset = 0;
1410bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
1411bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
1412f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1413b0c632dbSHeiko Carstens 
1414b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1415b0c632dbSHeiko Carstens 
14161cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1417b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
141840f5b735SDominik Dingel 		goto out_err;
1419b0c632dbSHeiko Carstens 
1420c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
1421c54f0d6aSDavid Hildenbrand 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1422c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
142340f5b735SDominik Dingel 		goto out_err;
14249d8d5786SMichael Mueller 
1425fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1426c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
142794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
14289d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
14299d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1430c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
14319d8d5786SMichael Mueller 		else
1432c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] = 0UL;
14339d8d5786SMichael Mueller 	}
14349d8d5786SMichael Mueller 
1435981467c9SMichael Mueller 	/* Populate the facility list initially. */
1436c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1437c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1438981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1439981467c9SMichael Mueller 
144095ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
144195ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
144295ca2cb5SJanosch Frank 
14439bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
144437c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
14459d8d5786SMichael Mueller 
1446c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
14475102ee87STony Krowiak 
1448ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
14496d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
14506d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
14518a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1452a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1453ba5c1e9bSCarsten Otte 
1454b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
145578f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1456b0c632dbSHeiko Carstens 
1457e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1458e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1459a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1460e08b9637SCarsten Otte 	} else {
146132e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
1462a3a92c31SDominik Dingel 			kvm->arch.mem_limit = TASK_MAX_SIZE;
146332e6b236SGuenther Hutzl 		else
146432e6b236SGuenther Hutzl 			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
146532e6b236SGuenther Hutzl 						    sclp.hamax + 1);
14666ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1467598841caSCarsten Otte 		if (!kvm->arch.gmap)
146840f5b735SDominik Dingel 			goto out_err;
14692c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
147024eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1471e08b9637SCarsten Otte 	}
1472fa6b7fe9SCornelia Huck 
1473fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
147484223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
147572f25020SJason J. Herne 	kvm->arch.epoch = 0;
1476fa6b7fe9SCornelia Huck 
14778ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
1478a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
14798335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
14808ad35755SDavid Hildenbrand 
1481d89f5effSJan Kiszka 	return 0;
1482d89f5effSJan Kiszka out_err:
1483c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
148440f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
14857d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
148678f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1487d89f5effSJan Kiszka 	return rc;
1488b0c632dbSHeiko Carstens }
1489b0c632dbSHeiko Carstens 
1490d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1491d329c035SChristian Borntraeger {
1492d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1493ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
149467335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
14953c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
1496bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
1497a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
149827e0393fSCarsten Otte 
149927e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
15006ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
150127e0393fSCarsten Otte 
1502e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1503b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1504d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1505b31288faSKonstantin Weitz 
15066692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1507b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1508d329c035SChristian Borntraeger }
1509d329c035SChristian Borntraeger 
1510d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1511d329c035SChristian Borntraeger {
1512d329c035SChristian Borntraeger 	unsigned int i;
1513988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1514d329c035SChristian Borntraeger 
1515988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1516988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1517988a2caeSGleb Natapov 
1518988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1519988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1520d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1521988a2caeSGleb Natapov 
1522988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1523988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1524d329c035SChristian Borntraeger }
1525d329c035SChristian Borntraeger 
1526b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1527b0c632dbSHeiko Carstens {
1528d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
15297d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
1530d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
1531c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
153227e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
15336ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
1534841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
153567335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1536a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
15378335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1538b0c632dbSHeiko Carstens }
1539b0c632dbSHeiko Carstens 
1540b0c632dbSHeiko Carstens /* Section: vcpu related */
1541dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1542b0c632dbSHeiko Carstens {
15436ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
154427e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
154527e0393fSCarsten Otte 		return -ENOMEM;
15462c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1547dafd032aSDominik Dingel 
154827e0393fSCarsten Otte 	return 0;
154927e0393fSCarsten Otte }
155027e0393fSCarsten Otte 
1551a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1552a6e2f683SEugene (jno) Dvurechenski {
15535e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
15547d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
15557d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
15567d43bafcSEugene (jno) Dvurechenski 
15577d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15587d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
15597d43bafcSEugene (jno) Dvurechenski 	} else {
1560bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1561a6e2f683SEugene (jno) Dvurechenski 
1562a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1563a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
1564a6e2f683SEugene (jno) Dvurechenski 	}
15655e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
15667d43bafcSEugene (jno) Dvurechenski }
1567a6e2f683SEugene (jno) Dvurechenski 
1568eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1569a6e2f683SEugene (jno) Dvurechenski {
1570eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
1571eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
1572eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
15737d43bafcSEugene (jno) Dvurechenski 
1574eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
15757d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
15767d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
157725508824SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1578eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15797d43bafcSEugene (jno) Dvurechenski 	} else {
1580eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1581a6e2f683SEugene (jno) Dvurechenski 
1582eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1583a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1584a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1585eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1586a6e2f683SEugene (jno) Dvurechenski 	}
1587eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
15885e044315SEugene (jno) Dvurechenski }
15895e044315SEugene (jno) Dvurechenski 
15905e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
15915e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
15925e044315SEugene (jno) Dvurechenski {
15935e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
15945e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
15955e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
15965e044315SEugene (jno) Dvurechenski }
15975e044315SEugene (jno) Dvurechenski 
15985e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
15995e044315SEugene (jno) Dvurechenski {
16005e044315SEugene (jno) Dvurechenski 	int i;
16015e044315SEugene (jno) Dvurechenski 
16025e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
16035e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
16045e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
16055e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
16065e044315SEugene (jno) Dvurechenski }
16075e044315SEugene (jno) Dvurechenski 
16085e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
16095e044315SEugene (jno) Dvurechenski {
16105e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
16115e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
16125e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
16135e044315SEugene (jno) Dvurechenski 	unsigned int vcpu_idx;
16145e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
16155e044315SEugene (jno) Dvurechenski 
16165e044315SEugene (jno) Dvurechenski 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
16175e044315SEugene (jno) Dvurechenski 	if (!new_sca)
16185e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
16195e044315SEugene (jno) Dvurechenski 
16205e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
16215e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
16225e044315SEugene (jno) Dvurechenski 
16235e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
16245e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
16255e044315SEugene (jno) Dvurechenski 
16265e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
16275e044315SEugene (jno) Dvurechenski 
16285e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
16295e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
16305e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
16315e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->ecb2 |= 0x04U;
16325e044315SEugene (jno) Dvurechenski 	}
16335e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
16345e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
16355e044315SEugene (jno) Dvurechenski 
16365e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
16375e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
16385e044315SEugene (jno) Dvurechenski 
16395e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
16405e044315SEugene (jno) Dvurechenski 
16418335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
16428335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
16435e044315SEugene (jno) Dvurechenski 	return 0;
16447d43bafcSEugene (jno) Dvurechenski }
1645a6e2f683SEugene (jno) Dvurechenski 
1646a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1647a6e2f683SEugene (jno) Dvurechenski {
16485e044315SEugene (jno) Dvurechenski 	int rc;
16495e044315SEugene (jno) Dvurechenski 
16505e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
16515e044315SEugene (jno) Dvurechenski 		return true;
165276a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
16535e044315SEugene (jno) Dvurechenski 		return false;
16545e044315SEugene (jno) Dvurechenski 
16555e044315SEugene (jno) Dvurechenski 	mutex_lock(&kvm->lock);
16565e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
16575e044315SEugene (jno) Dvurechenski 	mutex_unlock(&kvm->lock);
16585e044315SEugene (jno) Dvurechenski 
16595e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1660a6e2f683SEugene (jno) Dvurechenski }
1661a6e2f683SEugene (jno) Dvurechenski 
1662dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1663dafd032aSDominik Dingel {
1664dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1665dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
166659674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
166759674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
16689eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1669b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1670b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1671b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
1672c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1673c6e5f166SFan Zhang 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1674f6aa6dc4SDavid Hildenbrand 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
1675f6aa6dc4SDavid Hildenbrand 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1676f6aa6dc4SDavid Hildenbrand 	 */
1677f6aa6dc4SDavid Hildenbrand 	if (MACHINE_HAS_VX)
167868c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
16796fd8e67dSDavid Hildenbrand 	else
16806fd8e67dSDavid Hildenbrand 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1681dafd032aSDominik Dingel 
1682dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1683dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1684dafd032aSDominik Dingel 
1685b0c632dbSHeiko Carstens 	return 0;
1686b0c632dbSHeiko Carstens }
1687b0c632dbSHeiko Carstens 
1688db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1689db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1690db0758b2SDavid Hildenbrand {
1691db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
16929c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1693db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
16949c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1695db0758b2SDavid Hildenbrand }
1696db0758b2SDavid Hildenbrand 
1697db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1698db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1699db0758b2SDavid Hildenbrand {
1700db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
17019c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1702db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1703db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
17049c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1705db0758b2SDavid Hildenbrand }
1706db0758b2SDavid Hildenbrand 
1707db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1708db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1709db0758b2SDavid Hildenbrand {
1710db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1711db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
1712db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
1713db0758b2SDavid Hildenbrand }
1714db0758b2SDavid Hildenbrand 
1715db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1716db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1717db0758b2SDavid Hildenbrand {
1718db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1719db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
1720db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
1721db0758b2SDavid Hildenbrand }
1722db0758b2SDavid Hildenbrand 
1723db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1724db0758b2SDavid Hildenbrand {
1725db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1726db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
1727db0758b2SDavid Hildenbrand 	preempt_enable();
1728db0758b2SDavid Hildenbrand }
1729db0758b2SDavid Hildenbrand 
1730db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1731db0758b2SDavid Hildenbrand {
1732db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1733db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
1734db0758b2SDavid Hildenbrand 	preempt_enable();
1735db0758b2SDavid Hildenbrand }
1736db0758b2SDavid Hildenbrand 
17374287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
17384287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
17394287f247SDavid Hildenbrand {
1740db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17419c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1742db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
1743db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
17444287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
17459c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1746db0758b2SDavid Hildenbrand 	preempt_enable();
17474287f247SDavid Hildenbrand }
17484287f247SDavid Hildenbrand 
1749db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
17504287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
17514287f247SDavid Hildenbrand {
17529c23a131SDavid Hildenbrand 	unsigned int seq;
1753db0758b2SDavid Hildenbrand 	__u64 value;
1754db0758b2SDavid Hildenbrand 
1755db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
17564287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
1757db0758b2SDavid Hildenbrand 
17589c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17599c23a131SDavid Hildenbrand 	do {
17609c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
17619c23a131SDavid Hildenbrand 		/*
17629c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
17639c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
17649c23a131SDavid Hildenbrand 		 */
17659c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1766db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
17679c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
17689c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
1769db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
17709c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
17719c23a131SDavid Hildenbrand 	preempt_enable();
1772db0758b2SDavid Hildenbrand 	return value;
17734287f247SDavid Hildenbrand }
17744287f247SDavid Hildenbrand 
1775b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1776b0c632dbSHeiko Carstens {
17779977e886SHendrik Brueckner 	/* Save host register state */
1778d0164ee2SHendrik Brueckner 	save_fpu_regs();
17799abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
17809abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
178196b2d7a8SHendrik Brueckner 
17826fd8e67dSDavid Hildenbrand 	if (MACHINE_HAS_VX)
17839abc2a08SDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
17846fd8e67dSDavid Hildenbrand 	else
17856fd8e67dSDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
17869abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
17879977e886SHendrik Brueckner 	if (test_fp_ctl(current->thread.fpu.fpc))
178896b2d7a8SHendrik Brueckner 		/* User space provided an invalid FPC, let's clear it */
17899977e886SHendrik Brueckner 		current->thread.fpu.fpc = 0;
17909977e886SHendrik Brueckner 
17919977e886SHendrik Brueckner 	save_access_regs(vcpu->arch.host_acrs);
179259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
179337d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
1794805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
17955ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1796db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
179701a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
1798b0c632dbSHeiko Carstens }
1799b0c632dbSHeiko Carstens 
1800b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1801b0c632dbSHeiko Carstens {
180201a745acSDavid Hildenbrand 	vcpu->cpu = -1;
18035ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1804db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
1805805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
180637d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
180737d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
18089977e886SHendrik Brueckner 
18099abc2a08SDavid Hildenbrand 	/* Save guest register state */
1810d0164ee2SHendrik Brueckner 	save_fpu_regs();
18119977e886SHendrik Brueckner 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
18129abc2a08SDavid Hildenbrand 
18139abc2a08SDavid Hildenbrand 	/* Restore host register state */
18149abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
18159abc2a08SDavid Hildenbrand 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
18169977e886SHendrik Brueckner 
18179977e886SHendrik Brueckner 	save_access_regs(vcpu->run->s.regs.acrs);
1818b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1819b0c632dbSHeiko Carstens }
1820b0c632dbSHeiko Carstens 
1821b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1822b0c632dbSHeiko Carstens {
1823b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1824b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1825b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
18268d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
18274287f247SDavid Hildenbrand 	kvm_s390_set_cpu_timer(vcpu, 0);
1828b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1829b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1830b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1831b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1832b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
18339abc2a08SDavid Hildenbrand 	/* make sure the new fpc will be lazily loaded */
18349abc2a08SDavid Hildenbrand 	save_fpu_regs();
18359abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = 0;
1836b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1837672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
18383c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
18393c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
18406352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
18416852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
18422ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1843b0c632dbSHeiko Carstens }
1844b0c632dbSHeiko Carstens 
184531928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
184642897d86SMarcelo Tosatti {
184772f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1848fdf03650SFan Zhang 	preempt_disable();
184972f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1850fdf03650SFan Zhang 	preempt_enable();
185172f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
185225508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
1853dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1854eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
185525508824SDavid Hildenbrand 	}
1856*6502a34cSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
1857*6502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
185837d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
185937d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
186042897d86SMarcelo Tosatti }
186142897d86SMarcelo Tosatti 
18625102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
18635102ee87STony Krowiak {
18649d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
18655102ee87STony Krowiak 		return;
18665102ee87STony Krowiak 
1867a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1868a374e892STony Krowiak 
1869a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1870a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1871a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1872a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1873a374e892STony Krowiak 
18745102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
18755102ee87STony Krowiak }
18765102ee87STony Krowiak 
1877b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1878b31605c1SDominik Dingel {
1879b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1880b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1881b31605c1SDominik Dingel }
1882b31605c1SDominik Dingel 
1883b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1884b31605c1SDominik Dingel {
1885b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1886b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1887b31605c1SDominik Dingel 		return -ENOMEM;
1888b31605c1SDominik Dingel 
1889b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1890b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1891b31605c1SDominik Dingel 	return 0;
1892b31605c1SDominik Dingel }
1893b31605c1SDominik Dingel 
189491520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
189591520f1aSMichael Mueller {
189691520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
189791520f1aSMichael Mueller 
189891520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
189980bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
1900c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
190191520f1aSMichael Mueller }
190291520f1aSMichael Mueller 
1903b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1904b0c632dbSHeiko Carstens {
1905b31605c1SDominik Dingel 	int rc = 0;
1906b31288faSKonstantin Weitz 
19079e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
19089e6dabefSCornelia Huck 						    CPUSTAT_SM |
1909a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1910a4a4f191SGuenther Hutzl 
191153df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
1912805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
191353df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1914805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1915a4a4f191SGuenther Hutzl 
191691520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
191791520f1aSMichael Mueller 
1918bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1919bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
1920bdab09f3SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x02;
1921bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
1922bd50e8ecSDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x04;
1923f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
19247feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
19257feb6bb8SMichael Mueller 
1926873b425eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1927d6af0b49SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x08;
192848ee7d3aSDavid Hildenbrand 	vcpu->arch.sie_block->eca = 0x1002000U;
192948ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
193048ee7d3aSDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x80000000U;
193111ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
193211ad65b7SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x40000000U;
193337c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1934217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
193537c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1936ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1937c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1938c6e5f166SFan Zhang 		vcpu->arch.sie_block->ecb3 |= 0x01;
193918280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
194013211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
194113211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
194213211ea7SEric Farman 	}
1943c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1944492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
19455a5e6536SMatthew Rosato 
1946e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1947b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1948b31605c1SDominik Dingel 		if (rc)
1949b31605c1SDominik Dingel 			return rc;
1950b31288faSKonstantin Weitz 	}
19510ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1952ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
19539d8d5786SMichael Mueller 
19545102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
19555102ee87STony Krowiak 
1956b31605c1SDominik Dingel 	return rc;
1957b0c632dbSHeiko Carstens }
1958b0c632dbSHeiko Carstens 
1959b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1960b0c632dbSHeiko Carstens 				      unsigned int id)
1961b0c632dbSHeiko Carstens {
19624d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
19637feb6bb8SMichael Mueller 	struct sie_page *sie_page;
19644d47555aSCarsten Otte 	int rc = -EINVAL;
1965b0c632dbSHeiko Carstens 
19664215825eSDavid Hildenbrand 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
19674d47555aSCarsten Otte 		goto out;
19684d47555aSCarsten Otte 
19694d47555aSCarsten Otte 	rc = -ENOMEM;
19704d47555aSCarsten Otte 
1971b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1972b0c632dbSHeiko Carstens 	if (!vcpu)
19734d47555aSCarsten Otte 		goto out;
1974b0c632dbSHeiko Carstens 
19757feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
19767feb6bb8SMichael Mueller 	if (!sie_page)
1977b0c632dbSHeiko Carstens 		goto out_free_cpu;
1978b0c632dbSHeiko Carstens 
19797feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
19807feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
19817feb6bb8SMichael Mueller 
1982efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
1983efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
1984efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
1985efed1104SDavid Hildenbrand 
1986b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
1987ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1988ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1989d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
19905288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
19919c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
1992ba5c1e9bSCarsten Otte 
1993b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1994b0c632dbSHeiko Carstens 	if (rc)
19959abc2a08SDavid Hildenbrand 		goto out_free_sie_block;
19968335713aSChristian Borntraeger 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1997b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1998ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1999b0c632dbSHeiko Carstens 
2000b0c632dbSHeiko Carstens 	return vcpu;
20017b06bf2fSWei Yongjun out_free_sie_block:
20027b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
2003b0c632dbSHeiko Carstens out_free_cpu:
2004b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
20054d47555aSCarsten Otte out:
2006b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
2007b0c632dbSHeiko Carstens }
2008b0c632dbSHeiko Carstens 
2009b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2010b0c632dbSHeiko Carstens {
20119a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
2012b0c632dbSHeiko Carstens }
2013b0c632dbSHeiko Carstens 
201427406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
201549b99e1eSChristian Borntraeger {
2016805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
201761a6df54SDavid Hildenbrand 	exit_sie(vcpu);
201849b99e1eSChristian Borntraeger }
201949b99e1eSChristian Borntraeger 
202027406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
202149b99e1eSChristian Borntraeger {
2022805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
202349b99e1eSChristian Borntraeger }
202449b99e1eSChristian Borntraeger 
20258e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
20268e236546SChristian Borntraeger {
2027805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
202861a6df54SDavid Hildenbrand 	exit_sie(vcpu);
20298e236546SChristian Borntraeger }
20308e236546SChristian Borntraeger 
20318e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
20328e236546SChristian Borntraeger {
20339bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
20348e236546SChristian Borntraeger }
20358e236546SChristian Borntraeger 
203649b99e1eSChristian Borntraeger /*
203749b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
203849b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
203949b99e1eSChristian Borntraeger  * return immediately. */
204049b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
204149b99e1eSChristian Borntraeger {
2042805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
204349b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
204449b99e1eSChristian Borntraeger 		cpu_relax();
204549b99e1eSChristian Borntraeger }
204649b99e1eSChristian Borntraeger 
20478e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
20488e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
204949b99e1eSChristian Borntraeger {
20508e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
20518e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
205249b99e1eSChristian Borntraeger }
205349b99e1eSChristian Borntraeger 
2054414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2055414d3b07SMartin Schwidefsky 			      unsigned long end)
20562c70fe44SChristian Borntraeger {
20572c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
20582c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
2059414d3b07SMartin Schwidefsky 	unsigned long prefix;
2060414d3b07SMartin Schwidefsky 	int i;
20612c70fe44SChristian Borntraeger 
206265d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
206365d0b0d4SDavid Hildenbrand 		return;
2064414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
2065414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
2066414d3b07SMartin Schwidefsky 		return;
20672c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
20682c70fe44SChristian Borntraeger 		/* match against both prefix pages */
2069414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
2070414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2071414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2072414d3b07SMartin Schwidefsky 				   start, end);
20738e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
20742c70fe44SChristian Borntraeger 		}
20752c70fe44SChristian Borntraeger 	}
20762c70fe44SChristian Borntraeger }
20772c70fe44SChristian Borntraeger 
2078b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2079b6d33834SChristoffer Dall {
2080b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
2081b6d33834SChristoffer Dall 	BUG();
2082b6d33834SChristoffer Dall 	return 0;
2083b6d33834SChristoffer Dall }
2084b6d33834SChristoffer Dall 
208514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
208614eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
208714eebd91SCarsten Otte {
208814eebd91SCarsten Otte 	int r = -EINVAL;
208914eebd91SCarsten Otte 
209014eebd91SCarsten Otte 	switch (reg->id) {
209129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
209229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
209329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
209429b7c71bSCarsten Otte 		break;
209529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
209629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
209729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
209829b7c71bSCarsten Otte 		break;
209946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
21004287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
210146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
210246a6dd1cSJason J. herne 		break;
210346a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
210446a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
210546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
210646a6dd1cSJason J. herne 		break;
2107536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2108536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
2109536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2110536336c2SDominik Dingel 		break;
2111536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2112536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
2113536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2114536336c2SDominik Dingel 		break;
2115536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2116536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
2117536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2118536336c2SDominik Dingel 		break;
2119672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2120672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
2121672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2122672550fbSChristian Borntraeger 		break;
2123afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2124afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
2125afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2126afa45ff5SChristian Borntraeger 		break;
212714eebd91SCarsten Otte 	default:
212814eebd91SCarsten Otte 		break;
212914eebd91SCarsten Otte 	}
213014eebd91SCarsten Otte 
213114eebd91SCarsten Otte 	return r;
213214eebd91SCarsten Otte }
213314eebd91SCarsten Otte 
213414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
213514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
213614eebd91SCarsten Otte {
213714eebd91SCarsten Otte 	int r = -EINVAL;
21384287f247SDavid Hildenbrand 	__u64 val;
213914eebd91SCarsten Otte 
214014eebd91SCarsten Otte 	switch (reg->id) {
214129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
214229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
214329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
214429b7c71bSCarsten Otte 		break;
214529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
214629b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
214729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
214829b7c71bSCarsten Otte 		break;
214946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
21504287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
21514287f247SDavid Hildenbrand 		if (!r)
21524287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
215346a6dd1cSJason J. herne 		break;
215446a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
215546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
215646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
215746a6dd1cSJason J. herne 		break;
2158536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2159536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
2160536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
21619fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
21629fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2163536336c2SDominik Dingel 		break;
2164536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2165536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
2166536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2167536336c2SDominik Dingel 		break;
2168536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2169536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
2170536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2171536336c2SDominik Dingel 		break;
2172672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2173672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
2174672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2175672550fbSChristian Borntraeger 		break;
2176afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2177afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
2178afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2179afa45ff5SChristian Borntraeger 		break;
218014eebd91SCarsten Otte 	default:
218114eebd91SCarsten Otte 		break;
218214eebd91SCarsten Otte 	}
218314eebd91SCarsten Otte 
218414eebd91SCarsten Otte 	return r;
218514eebd91SCarsten Otte }
2186b6d33834SChristoffer Dall 
2187b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2188b0c632dbSHeiko Carstens {
2189b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
2190b0c632dbSHeiko Carstens 	return 0;
2191b0c632dbSHeiko Carstens }
2192b0c632dbSHeiko Carstens 
2193b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2194b0c632dbSHeiko Carstens {
21955a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
2196b0c632dbSHeiko Carstens 	return 0;
2197b0c632dbSHeiko Carstens }
2198b0c632dbSHeiko Carstens 
2199b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2200b0c632dbSHeiko Carstens {
22015a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2202b0c632dbSHeiko Carstens 	return 0;
2203b0c632dbSHeiko Carstens }
2204b0c632dbSHeiko Carstens 
2205b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2206b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2207b0c632dbSHeiko Carstens {
220859674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2209b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
221059674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
2211b0c632dbSHeiko Carstens 	return 0;
2212b0c632dbSHeiko Carstens }
2213b0c632dbSHeiko Carstens 
2214b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2215b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2216b0c632dbSHeiko Carstens {
221759674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2218b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2219b0c632dbSHeiko Carstens 	return 0;
2220b0c632dbSHeiko Carstens }
2221b0c632dbSHeiko Carstens 
2222b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2223b0c632dbSHeiko Carstens {
22249abc2a08SDavid Hildenbrand 	/* make sure the new values will be lazily loaded */
22259abc2a08SDavid Hildenbrand 	save_fpu_regs();
22264725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
22274725c860SMartin Schwidefsky 		return -EINVAL;
22289abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = fpu->fpc;
22299abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22309abc2a08SDavid Hildenbrand 		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
22319abc2a08SDavid Hildenbrand 	else
22329abc2a08SDavid Hildenbrand 		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
2233b0c632dbSHeiko Carstens 	return 0;
2234b0c632dbSHeiko Carstens }
2235b0c632dbSHeiko Carstens 
2236b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2237b0c632dbSHeiko Carstens {
22389abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
22399abc2a08SDavid Hildenbrand 	save_fpu_regs();
22409abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22419abc2a08SDavid Hildenbrand 		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
22429abc2a08SDavid Hildenbrand 	else
22439abc2a08SDavid Hildenbrand 		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
22449abc2a08SDavid Hildenbrand 	fpu->fpc = current->thread.fpu.fpc;
2245b0c632dbSHeiko Carstens 	return 0;
2246b0c632dbSHeiko Carstens }
2247b0c632dbSHeiko Carstens 
2248b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2249b0c632dbSHeiko Carstens {
2250b0c632dbSHeiko Carstens 	int rc = 0;
2251b0c632dbSHeiko Carstens 
22527a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
2253b0c632dbSHeiko Carstens 		rc = -EBUSY;
2254d7b0b5ebSCarsten Otte 	else {
2255d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
2256d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
2257d7b0b5ebSCarsten Otte 	}
2258b0c632dbSHeiko Carstens 	return rc;
2259b0c632dbSHeiko Carstens }
2260b0c632dbSHeiko Carstens 
2261b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2262b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
2263b0c632dbSHeiko Carstens {
2264b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
2265b0c632dbSHeiko Carstens }
2266b0c632dbSHeiko Carstens 
226727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
226827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
226927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
227027291e21SDavid Hildenbrand 
2271d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2272d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
2273b0c632dbSHeiko Carstens {
227427291e21SDavid Hildenbrand 	int rc = 0;
227527291e21SDavid Hildenbrand 
227627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
227727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
227827291e21SDavid Hildenbrand 
22792de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
228027291e21SDavid Hildenbrand 		return -EINVAL;
228189b5b4deSDavid Hildenbrand 	if (!sclp.has_gpere)
228289b5b4deSDavid Hildenbrand 		return -EINVAL;
228327291e21SDavid Hildenbrand 
228427291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
228527291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
228627291e21SDavid Hildenbrand 		/* enforce guest PER */
2287805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
228827291e21SDavid Hildenbrand 
228927291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
229027291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
229127291e21SDavid Hildenbrand 	} else {
2292805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
229327291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
229427291e21SDavid Hildenbrand 	}
229527291e21SDavid Hildenbrand 
229627291e21SDavid Hildenbrand 	if (rc) {
229727291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
229827291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
2299805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
230027291e21SDavid Hildenbrand 	}
230127291e21SDavid Hildenbrand 
230227291e21SDavid Hildenbrand 	return rc;
2303b0c632dbSHeiko Carstens }
2304b0c632dbSHeiko Carstens 
230562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
230662d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
230762d9f0dbSMarcelo Tosatti {
23086352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
23096352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
23106352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
231162d9f0dbSMarcelo Tosatti }
231262d9f0dbSMarcelo Tosatti 
231362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
231462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
231562d9f0dbSMarcelo Tosatti {
23166352e4d2SDavid Hildenbrand 	int rc = 0;
23176352e4d2SDavid Hildenbrand 
23186352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
23196352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
23206352e4d2SDavid Hildenbrand 
23216352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
23226352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
23236352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
23246352e4d2SDavid Hildenbrand 		break;
23256352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
23266352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
23276352e4d2SDavid Hildenbrand 		break;
23286352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
23296352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
23306352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
23316352e4d2SDavid Hildenbrand 	default:
23326352e4d2SDavid Hildenbrand 		rc = -ENXIO;
23336352e4d2SDavid Hildenbrand 	}
23346352e4d2SDavid Hildenbrand 
23356352e4d2SDavid Hildenbrand 	return rc;
233662d9f0dbSMarcelo Tosatti }
233762d9f0dbSMarcelo Tosatti 
23388ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
23398ad35755SDavid Hildenbrand {
23408ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
23418ad35755SDavid Hildenbrand }
23428ad35755SDavid Hildenbrand 
23432c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
23442c70fe44SChristian Borntraeger {
23458ad35755SDavid Hildenbrand retry:
23468e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
2347586b7ccdSChristian Borntraeger 	if (!vcpu->requests)
2348586b7ccdSChristian Borntraeger 		return 0;
23492c70fe44SChristian Borntraeger 	/*
23502c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2351b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
23522c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
23532c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
23542c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
23552c70fe44SChristian Borntraeger 	 */
23568ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
23572c70fe44SChristian Borntraeger 		int rc;
2358b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
2359fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
2360b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
23612c70fe44SChristian Borntraeger 		if (rc)
23622c70fe44SChristian Borntraeger 			return rc;
23638ad35755SDavid Hildenbrand 		goto retry;
23642c70fe44SChristian Borntraeger 	}
23658ad35755SDavid Hildenbrand 
2366d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2367d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
2368d3d692c8SDavid Hildenbrand 		goto retry;
2369d3d692c8SDavid Hildenbrand 	}
2370d3d692c8SDavid Hildenbrand 
23718ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
23728ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
23738ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2374805de8f4SPeter Zijlstra 			atomic_or(CPUSTAT_IBS,
23758ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
23768ad35755SDavid Hildenbrand 		}
23778ad35755SDavid Hildenbrand 		goto retry;
23788ad35755SDavid Hildenbrand 	}
23798ad35755SDavid Hildenbrand 
23808ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
23818ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
23828ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2383805de8f4SPeter Zijlstra 			atomic_andnot(CPUSTAT_IBS,
23848ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
23858ad35755SDavid Hildenbrand 		}
23868ad35755SDavid Hildenbrand 		goto retry;
23878ad35755SDavid Hildenbrand 	}
23888ad35755SDavid Hildenbrand 
2389*6502a34cSDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2390*6502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2391*6502a34cSDavid Hildenbrand 		goto retry;
2392*6502a34cSDavid Hildenbrand 	}
2393*6502a34cSDavid Hildenbrand 
23940759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
23950759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
23960759d068SDavid Hildenbrand 
23972c70fe44SChristian Borntraeger 	return 0;
23982c70fe44SChristian Borntraeger }
23992c70fe44SChristian Borntraeger 
240025ed1675SDavid Hildenbrand void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
240125ed1675SDavid Hildenbrand {
240225ed1675SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
240325ed1675SDavid Hildenbrand 	int i;
240425ed1675SDavid Hildenbrand 
240525ed1675SDavid Hildenbrand 	mutex_lock(&kvm->lock);
240625ed1675SDavid Hildenbrand 	preempt_disable();
240725ed1675SDavid Hildenbrand 	kvm->arch.epoch = tod - get_tod_clock();
240825ed1675SDavid Hildenbrand 	kvm_s390_vcpu_block_all(kvm);
240925ed1675SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm)
241025ed1675SDavid Hildenbrand 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
241125ed1675SDavid Hildenbrand 	kvm_s390_vcpu_unblock_all(kvm);
241225ed1675SDavid Hildenbrand 	preempt_enable();
241325ed1675SDavid Hildenbrand 	mutex_unlock(&kvm->lock);
241425ed1675SDavid Hildenbrand }
241525ed1675SDavid Hildenbrand 
2416fa576c58SThomas Huth /**
2417fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
2418fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
2419fa576c58SThomas Huth  * @gpa: Guest physical address
2420fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
2421fa576c58SThomas Huth  *
2422fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
2423fa576c58SThomas Huth  *
2424fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
2425fa576c58SThomas Huth  */
2426fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
242724eb3a82SDominik Dingel {
2428527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
2429527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
243024eb3a82SDominik Dingel }
243124eb3a82SDominik Dingel 
24323c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
24333c038e6bSDominik Dingel 				      unsigned long token)
24343c038e6bSDominik Dingel {
24353c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
2436383d0b05SJens Freimann 	struct kvm_s390_irq irq;
24373c038e6bSDominik Dingel 
24383c038e6bSDominik Dingel 	if (start_token) {
2439383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
2440383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
2441383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
24423c038e6bSDominik Dingel 	} else {
24433c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
2444383d0b05SJens Freimann 		inti.parm64 = token;
24453c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
24463c038e6bSDominik Dingel 	}
24473c038e6bSDominik Dingel }
24483c038e6bSDominik Dingel 
24493c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
24503c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
24513c038e6bSDominik Dingel {
24523c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
24533c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
24543c038e6bSDominik Dingel }
24553c038e6bSDominik Dingel 
24563c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
24573c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
24583c038e6bSDominik Dingel {
24593c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
24603c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
24613c038e6bSDominik Dingel }
24623c038e6bSDominik Dingel 
24633c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
24643c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
24653c038e6bSDominik Dingel {
24663c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
24673c038e6bSDominik Dingel }
24683c038e6bSDominik Dingel 
24693c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
24703c038e6bSDominik Dingel {
24713c038e6bSDominik Dingel 	/*
24723c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
24733c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
24743c038e6bSDominik Dingel 	 */
24753c038e6bSDominik Dingel 	return true;
24763c038e6bSDominik Dingel }
24773c038e6bSDominik Dingel 
24783c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
24793c038e6bSDominik Dingel {
24803c038e6bSDominik Dingel 	hva_t hva;
24813c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
24823c038e6bSDominik Dingel 	int rc;
24833c038e6bSDominik Dingel 
24843c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
24853c038e6bSDominik Dingel 		return 0;
24863c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
24873c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
24883c038e6bSDominik Dingel 		return 0;
24893c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
24903c038e6bSDominik Dingel 		return 0;
24919a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
24923c038e6bSDominik Dingel 		return 0;
24933c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
24943c038e6bSDominik Dingel 		return 0;
24953c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
24963c038e6bSDominik Dingel 		return 0;
24973c038e6bSDominik Dingel 
249881480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
249981480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
250081480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
25013c038e6bSDominik Dingel 		return 0;
25023c038e6bSDominik Dingel 
25033c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
25043c038e6bSDominik Dingel 	return rc;
25053c038e6bSDominik Dingel }
25063c038e6bSDominik Dingel 
25073fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2508b0c632dbSHeiko Carstens {
25093fb4c40fSThomas Huth 	int rc, cpuflags;
2510e168bf8dSCarsten Otte 
25113c038e6bSDominik Dingel 	/*
25123c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
25133c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
25143c038e6bSDominik Dingel 	 * handled outside the worker.
25153c038e6bSDominik Dingel 	 */
25163c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
25173c038e6bSDominik Dingel 
25187ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
25197ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2520b0c632dbSHeiko Carstens 
2521b0c632dbSHeiko Carstens 	if (need_resched())
2522b0c632dbSHeiko Carstens 		schedule();
2523b0c632dbSHeiko Carstens 
2524d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
252571cde587SChristian Borntraeger 		s390_handle_mcck();
252671cde587SChristian Borntraeger 
252779395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
252879395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
252979395031SJens Freimann 		if (rc)
253079395031SJens Freimann 			return rc;
253179395031SJens Freimann 	}
25320ff31867SCarsten Otte 
25332c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
25342c70fe44SChristian Borntraeger 	if (rc)
25352c70fe44SChristian Borntraeger 		return rc;
25362c70fe44SChristian Borntraeger 
253727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
253827291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
253927291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
254027291e21SDavid Hildenbrand 	}
254127291e21SDavid Hildenbrand 
2542b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
25433fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
25443fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
25453fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
25462b29a9fdSDominik Dingel 
25473fb4c40fSThomas Huth 	return 0;
25483fb4c40fSThomas Huth }
25493fb4c40fSThomas Huth 
2550492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2551492d8642SThomas Huth {
255256317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
255356317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
255456317920SDavid Hildenbrand 	};
255556317920SDavid Hildenbrand 	u8 opcode, ilen;
2556492d8642SThomas Huth 	int rc;
2557492d8642SThomas Huth 
2558492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2559492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2560492d8642SThomas Huth 
2561492d8642SThomas Huth 	/*
2562492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2563492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2564492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2565492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2566492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2567492d8642SThomas Huth 	 * to be able to forward the PSW.
2568492d8642SThomas Huth 	 */
256965977322SDavid Hildenbrand 	rc = read_guest_instr(vcpu, &opcode, 1);
257056317920SDavid Hildenbrand 	ilen = insn_length(opcode);
25719b0d721aSDavid Hildenbrand 	if (rc < 0) {
25729b0d721aSDavid Hildenbrand 		return rc;
25739b0d721aSDavid Hildenbrand 	} else if (rc) {
25749b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
25759b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
25769b0d721aSDavid Hildenbrand 		 * nullification if necessary.
25779b0d721aSDavid Hildenbrand 		 */
25789b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
25799b0d721aSDavid Hildenbrand 		ilen = 4;
25809b0d721aSDavid Hildenbrand 	}
258156317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
258256317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
258356317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2584492d8642SThomas Huth }
2585492d8642SThomas Huth 
25863fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
25873fb4c40fSThomas Huth {
25882b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
25892b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
25902b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
25912b29a9fdSDominik Dingel 
259227291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
259327291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
259427291e21SDavid Hildenbrand 
25957ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
25967ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
259771f116bfSDavid Hildenbrand 
259871f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
259971f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
260071f116bfSDavid Hildenbrand 
260171f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
260271f116bfSDavid Hildenbrand 			return rc;
260371f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
260471f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
260571f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
260671f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
260771f116bfSDavid Hildenbrand 		return -EREMOTE;
260871f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
260971f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
261071f116bfSDavid Hildenbrand 		return 0;
2611210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2612210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2613210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2614210b1607SThomas Huth 						current->thread.gmap_addr;
2615210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
261671f116bfSDavid Hildenbrand 		return -EREMOTE;
261724eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
26183c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
261924eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
262071f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
262171f116bfSDavid Hildenbrand 			return 0;
262271f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2623fa576c58SThomas Huth 	}
262471f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
26253fb4c40fSThomas Huth }
26263fb4c40fSThomas Huth 
26273fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
26283fb4c40fSThomas Huth {
26293fb4c40fSThomas Huth 	int rc, exit_reason;
26303fb4c40fSThomas Huth 
2631800c1065SThomas Huth 	/*
2632800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2633800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2634800c1065SThomas Huth 	 */
2635800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2636800c1065SThomas Huth 
2637a76ccff6SThomas Huth 	do {
26383fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
26393fb4c40fSThomas Huth 		if (rc)
2640a76ccff6SThomas Huth 			break;
26413fb4c40fSThomas Huth 
2642800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
26433fb4c40fSThomas Huth 		/*
2644a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2645a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
26463fb4c40fSThomas Huth 		 */
26470097d12eSChristian Borntraeger 		local_irq_disable();
26486edaa530SPaolo Bonzini 		guest_enter_irqoff();
2649db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
26500097d12eSChristian Borntraeger 		local_irq_enable();
2651a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2652a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
26530097d12eSChristian Borntraeger 		local_irq_disable();
2654db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
26556edaa530SPaolo Bonzini 		guest_exit_irqoff();
26560097d12eSChristian Borntraeger 		local_irq_enable();
2657800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
26583fb4c40fSThomas Huth 
26593fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
266027291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
26613fb4c40fSThomas Huth 
2662800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2663e168bf8dSCarsten Otte 	return rc;
2664b0c632dbSHeiko Carstens }
2665b0c632dbSHeiko Carstens 
2666b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2667b028ee3eSDavid Hildenbrand {
2668b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2669b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2670b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2671b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2672b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2673b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2674d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2675d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2676b028ee3eSDavid Hildenbrand 	}
2677b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
26784287f247SDavid Hildenbrand 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2679b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2680b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2681b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2682b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2683b028ee3eSDavid Hildenbrand 	}
2684b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2685b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2686b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2687b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
26889fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
26899fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2690b028ee3eSDavid Hildenbrand 	}
2691b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2692b028ee3eSDavid Hildenbrand }
2693b028ee3eSDavid Hildenbrand 
2694b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2695b028ee3eSDavid Hildenbrand {
2696b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2697b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2698b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2699b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
27004287f247SDavid Hildenbrand 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2701b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2702b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2703b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2704b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2705b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2706b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2707b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2708b028ee3eSDavid Hildenbrand }
2709b028ee3eSDavid Hildenbrand 
2710b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2711b0c632dbSHeiko Carstens {
27128f2abe6aSChristian Borntraeger 	int rc;
2713b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2714b0c632dbSHeiko Carstens 
271527291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
271627291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
271727291e21SDavid Hildenbrand 		return 0;
271827291e21SDavid Hildenbrand 	}
271927291e21SDavid Hildenbrand 
2720b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2721b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2722b0c632dbSHeiko Carstens 
27236352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
27246852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
27256352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2726ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
27276352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
27286352e4d2SDavid Hildenbrand 		return -EINVAL;
27296352e4d2SDavid Hildenbrand 	}
2730b0c632dbSHeiko Carstens 
2731b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2732db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
2733d7b0b5ebSCarsten Otte 
2734dab4079dSHeiko Carstens 	might_fault();
2735e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
27369ace903dSChristian Ehrhardt 
2737b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2738b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
27398f2abe6aSChristian Borntraeger 		rc = -EINTR;
2740b1d16c49SChristian Ehrhardt 	}
27418f2abe6aSChristian Borntraeger 
274227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
274327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
274427291e21SDavid Hildenbrand 		rc = 0;
274527291e21SDavid Hildenbrand 	}
274627291e21SDavid Hildenbrand 
27478f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
274871f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
27498f2abe6aSChristian Borntraeger 		rc = 0;
27508f2abe6aSChristian Borntraeger 	}
27518f2abe6aSChristian Borntraeger 
2752db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
2753b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2754d7b0b5ebSCarsten Otte 
2755b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2756b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2757b0c632dbSHeiko Carstens 
2758b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
27597e8e6ab4SHeiko Carstens 	return rc;
2760b0c632dbSHeiko Carstens }
2761b0c632dbSHeiko Carstens 
2762b0c632dbSHeiko Carstens /*
2763b0c632dbSHeiko Carstens  * store status at address
2764b0c632dbSHeiko Carstens  * we use have two special cases:
2765b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2766b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2767b0c632dbSHeiko Carstens  */
2768d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2769b0c632dbSHeiko Carstens {
2770092670cdSCarsten Otte 	unsigned char archmode = 1;
27719abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
2772fda902cbSMichael Mueller 	unsigned int px;
27734287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
2774d0bce605SHeiko Carstens 	int rc;
2775b0c632dbSHeiko Carstens 
2776d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
2777d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2778d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2779b0c632dbSHeiko Carstens 			return -EFAULT;
2780d9a3a09aSMartin Schwidefsky 		gpa = 0;
2781d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2782d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2783b0c632dbSHeiko Carstens 			return -EFAULT;
2784d9a3a09aSMartin Schwidefsky 		gpa = px;
2785d9a3a09aSMartin Schwidefsky 	} else
2786d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
27879abc2a08SDavid Hildenbrand 
27889abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
27899abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
27909522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2791d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
27929abc2a08SDavid Hildenbrand 				     fprs, 128);
27939abc2a08SDavid Hildenbrand 	} else {
27949abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
27956fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
27969abc2a08SDavid Hildenbrand 	}
2797d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2798d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2799d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2800d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2801d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2802fda902cbSMichael Mueller 			      &px, 4);
2803d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
28049abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
2805d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2806d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
28074287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
2808d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
28094287f247SDavid Hildenbrand 			      &cputm, 8);
2810178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2811d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2812d0bce605SHeiko Carstens 			      &clkcomp, 8);
2813d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2814d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2815d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2816d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2817d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2818b0c632dbSHeiko Carstens }
2819b0c632dbSHeiko Carstens 
2820e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2821e879892cSThomas Huth {
2822e879892cSThomas Huth 	/*
2823e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2824e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2825e879892cSThomas Huth 	 * it into the save area
2826e879892cSThomas Huth 	 */
2827d0164ee2SHendrik Brueckner 	save_fpu_regs();
28289abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2829e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2830e879892cSThomas Huth 
2831e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2832e879892cSThomas Huth }
2833e879892cSThomas Huth 
2834bc17de7cSEric Farman /*
2835bc17de7cSEric Farman  * store additional status at address
2836bc17de7cSEric Farman  */
2837bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2838bc17de7cSEric Farman 					unsigned long gpa)
2839bc17de7cSEric Farman {
2840bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2841bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2842bc17de7cSEric Farman 		return 0;
2843bc17de7cSEric Farman 
2844bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2845bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2846bc17de7cSEric Farman }
2847bc17de7cSEric Farman 
2848bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2849bc17de7cSEric Farman {
2850bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2851bc17de7cSEric Farman 		return 0;
2852bc17de7cSEric Farman 
2853bc17de7cSEric Farman 	/*
2854bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
28559977e886SHendrik Brueckner 	 * copying in vcpu load/put. We can simply call save_fpu_regs()
28569977e886SHendrik Brueckner 	 * to save the current register state because we are in the
28579977e886SHendrik Brueckner 	 * middle of a load/put cycle.
28589977e886SHendrik Brueckner 	 *
28599977e886SHendrik Brueckner 	 * Let's update our copies before we save it into the save area.
2860bc17de7cSEric Farman 	 */
2861d0164ee2SHendrik Brueckner 	save_fpu_regs();
2862bc17de7cSEric Farman 
2863bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2864bc17de7cSEric Farman }
2865bc17de7cSEric Farman 
28668ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28678ad35755SDavid Hildenbrand {
28688ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
28698e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
28708ad35755SDavid Hildenbrand }
28718ad35755SDavid Hildenbrand 
28728ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
28738ad35755SDavid Hildenbrand {
28748ad35755SDavid Hildenbrand 	unsigned int i;
28758ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
28768ad35755SDavid Hildenbrand 
28778ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
28788ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
28798ad35755SDavid Hildenbrand 	}
28808ad35755SDavid Hildenbrand }
28818ad35755SDavid Hildenbrand 
28828ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28838ad35755SDavid Hildenbrand {
288409a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
288509a400e7SDavid Hildenbrand 		return;
28868ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
28878e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
28888ad35755SDavid Hildenbrand }
28898ad35755SDavid Hildenbrand 
28906852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
28916852d7b6SDavid Hildenbrand {
28928ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
28938ad35755SDavid Hildenbrand 
28948ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
28958ad35755SDavid Hildenbrand 		return;
28968ad35755SDavid Hildenbrand 
28976852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
28988ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2899433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
29008ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
29018ad35755SDavid Hildenbrand 
29028ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
29038ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
29048ad35755SDavid Hildenbrand 			started_vcpus++;
29058ad35755SDavid Hildenbrand 	}
29068ad35755SDavid Hildenbrand 
29078ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
29088ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
29098ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
29108ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
29118ad35755SDavid Hildenbrand 		/*
29128ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
29138ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
29148ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
29158ad35755SDavid Hildenbrand 		 */
29168ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
29178ad35755SDavid Hildenbrand 	}
29188ad35755SDavid Hildenbrand 
2919805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
29208ad35755SDavid Hildenbrand 	/*
29218ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
29228ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
29238ad35755SDavid Hildenbrand 	 */
2924d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2925433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29268ad35755SDavid Hildenbrand 	return;
29276852d7b6SDavid Hildenbrand }
29286852d7b6SDavid Hildenbrand 
29296852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
29306852d7b6SDavid Hildenbrand {
29318ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
29328ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
29338ad35755SDavid Hildenbrand 
29348ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
29358ad35755SDavid Hildenbrand 		return;
29368ad35755SDavid Hildenbrand 
29376852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
29388ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2939433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
29408ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
29418ad35755SDavid Hildenbrand 
294232f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
29436cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
294432f5ff63SDavid Hildenbrand 
2945805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
29468ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
29478ad35755SDavid Hildenbrand 
29488ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
29498ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
29508ad35755SDavid Hildenbrand 			started_vcpus++;
29518ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
29528ad35755SDavid Hildenbrand 		}
29538ad35755SDavid Hildenbrand 	}
29548ad35755SDavid Hildenbrand 
29558ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
29568ad35755SDavid Hildenbrand 		/*
29578ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
29588ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
29598ad35755SDavid Hildenbrand 		 */
29608ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
29618ad35755SDavid Hildenbrand 	}
29628ad35755SDavid Hildenbrand 
2963433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29648ad35755SDavid Hildenbrand 	return;
29656852d7b6SDavid Hildenbrand }
29666852d7b6SDavid Hildenbrand 
2967d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2968d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2969d6712df9SCornelia Huck {
2970d6712df9SCornelia Huck 	int r;
2971d6712df9SCornelia Huck 
2972d6712df9SCornelia Huck 	if (cap->flags)
2973d6712df9SCornelia Huck 		return -EINVAL;
2974d6712df9SCornelia Huck 
2975d6712df9SCornelia Huck 	switch (cap->cap) {
2976fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2977fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2978fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2979c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2980fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2981fa6b7fe9SCornelia Huck 		}
2982fa6b7fe9SCornelia Huck 		r = 0;
2983fa6b7fe9SCornelia Huck 		break;
2984d6712df9SCornelia Huck 	default:
2985d6712df9SCornelia Huck 		r = -EINVAL;
2986d6712df9SCornelia Huck 		break;
2987d6712df9SCornelia Huck 	}
2988d6712df9SCornelia Huck 	return r;
2989d6712df9SCornelia Huck }
2990d6712df9SCornelia Huck 
299141408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
299241408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
299341408c28SThomas Huth {
299441408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
299541408c28SThomas Huth 	void *tmpbuf = NULL;
299641408c28SThomas Huth 	int r, srcu_idx;
299741408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
299841408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
299941408c28SThomas Huth 
300041408c28SThomas Huth 	if (mop->flags & ~supported_flags)
300141408c28SThomas Huth 		return -EINVAL;
300241408c28SThomas Huth 
300341408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
300441408c28SThomas Huth 		return -E2BIG;
300541408c28SThomas Huth 
300641408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
300741408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
300841408c28SThomas Huth 		if (!tmpbuf)
300941408c28SThomas Huth 			return -ENOMEM;
301041408c28SThomas Huth 	}
301141408c28SThomas Huth 
301241408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
301341408c28SThomas Huth 
301441408c28SThomas Huth 	switch (mop->op) {
301541408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
301641408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
301792c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
301892c96321SDavid Hildenbrand 					    mop->size, GACC_FETCH);
301941408c28SThomas Huth 			break;
302041408c28SThomas Huth 		}
302141408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
302241408c28SThomas Huth 		if (r == 0) {
302341408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
302441408c28SThomas Huth 				r = -EFAULT;
302541408c28SThomas Huth 		}
302641408c28SThomas Huth 		break;
302741408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
302841408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
302992c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
303092c96321SDavid Hildenbrand 					    mop->size, GACC_STORE);
303141408c28SThomas Huth 			break;
303241408c28SThomas Huth 		}
303341408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
303441408c28SThomas Huth 			r = -EFAULT;
303541408c28SThomas Huth 			break;
303641408c28SThomas Huth 		}
303741408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
303841408c28SThomas Huth 		break;
303941408c28SThomas Huth 	default:
304041408c28SThomas Huth 		r = -EINVAL;
304141408c28SThomas Huth 	}
304241408c28SThomas Huth 
304341408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
304441408c28SThomas Huth 
304541408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
304641408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
304741408c28SThomas Huth 
304841408c28SThomas Huth 	vfree(tmpbuf);
304941408c28SThomas Huth 	return r;
305041408c28SThomas Huth }
305141408c28SThomas Huth 
3052b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
3053b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
3054b0c632dbSHeiko Carstens {
3055b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
3056b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
3057800c1065SThomas Huth 	int idx;
3058bc923cc9SAvi Kivity 	long r;
3059b0c632dbSHeiko Carstens 
306093736624SAvi Kivity 	switch (ioctl) {
306147b43c52SJens Freimann 	case KVM_S390_IRQ: {
306247b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
306347b43c52SJens Freimann 
306447b43c52SJens Freimann 		r = -EFAULT;
306547b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
306647b43c52SJens Freimann 			break;
306747b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
306847b43c52SJens Freimann 		break;
306947b43c52SJens Freimann 	}
307093736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
3071ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
3072383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
3073ba5c1e9bSCarsten Otte 
307493736624SAvi Kivity 		r = -EFAULT;
3075ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
307693736624SAvi Kivity 			break;
3077383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
3078383d0b05SJens Freimann 			return -EINVAL;
3079383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
308093736624SAvi Kivity 		break;
3081ba5c1e9bSCarsten Otte 	}
3082b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
3083800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
3084bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
3085800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3086bc923cc9SAvi Kivity 		break;
3087b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
3088b0c632dbSHeiko Carstens 		psw_t psw;
3089b0c632dbSHeiko Carstens 
3090bc923cc9SAvi Kivity 		r = -EFAULT;
3091b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
3092bc923cc9SAvi Kivity 			break;
3093bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3094bc923cc9SAvi Kivity 		break;
3095b0c632dbSHeiko Carstens 	}
3096b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
3097bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3098bc923cc9SAvi Kivity 		break;
309914eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
310014eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
310114eebd91SCarsten Otte 		struct kvm_one_reg reg;
310214eebd91SCarsten Otte 		r = -EFAULT;
310314eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
310414eebd91SCarsten Otte 			break;
310514eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
310614eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
310714eebd91SCarsten Otte 		else
310814eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
310914eebd91SCarsten Otte 		break;
311014eebd91SCarsten Otte 	}
311127e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
311227e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
311327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
311427e0393fSCarsten Otte 
311527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
311627e0393fSCarsten Otte 			r = -EFAULT;
311727e0393fSCarsten Otte 			break;
311827e0393fSCarsten Otte 		}
311927e0393fSCarsten Otte 
312027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
312127e0393fSCarsten Otte 			r = -EINVAL;
312227e0393fSCarsten Otte 			break;
312327e0393fSCarsten Otte 		}
312427e0393fSCarsten Otte 
312527e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
312627e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
312727e0393fSCarsten Otte 		break;
312827e0393fSCarsten Otte 	}
312927e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
313027e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
313127e0393fSCarsten Otte 
313227e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
313327e0393fSCarsten Otte 			r = -EFAULT;
313427e0393fSCarsten Otte 			break;
313527e0393fSCarsten Otte 		}
313627e0393fSCarsten Otte 
313727e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
313827e0393fSCarsten Otte 			r = -EINVAL;
313927e0393fSCarsten Otte 			break;
314027e0393fSCarsten Otte 		}
314127e0393fSCarsten Otte 
314227e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
314327e0393fSCarsten Otte 			ucasmap.length);
314427e0393fSCarsten Otte 		break;
314527e0393fSCarsten Otte 	}
314627e0393fSCarsten Otte #endif
3147ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
3148527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
3149ccc7910fSCarsten Otte 		break;
3150ccc7910fSCarsten Otte 	}
3151d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
3152d6712df9SCornelia Huck 	{
3153d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
3154d6712df9SCornelia Huck 		r = -EFAULT;
3155d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
3156d6712df9SCornelia Huck 			break;
3157d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3158d6712df9SCornelia Huck 		break;
3159d6712df9SCornelia Huck 	}
316041408c28SThomas Huth 	case KVM_S390_MEM_OP: {
316141408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
316241408c28SThomas Huth 
316341408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
316441408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
316541408c28SThomas Huth 		else
316641408c28SThomas Huth 			r = -EFAULT;
316741408c28SThomas Huth 		break;
316841408c28SThomas Huth 	}
3169816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
3170816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3171816c7667SJens Freimann 
3172816c7667SJens Freimann 		r = -EFAULT;
3173816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3174816c7667SJens Freimann 			break;
3175816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3176816c7667SJens Freimann 		    irq_state.len == 0 ||
3177816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3178816c7667SJens Freimann 			r = -EINVAL;
3179816c7667SJens Freimann 			break;
3180816c7667SJens Freimann 		}
3181816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
3182816c7667SJens Freimann 					   (void __user *) irq_state.buf,
3183816c7667SJens Freimann 					   irq_state.len);
3184816c7667SJens Freimann 		break;
3185816c7667SJens Freimann 	}
3186816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
3187816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3188816c7667SJens Freimann 
3189816c7667SJens Freimann 		r = -EFAULT;
3190816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3191816c7667SJens Freimann 			break;
3192816c7667SJens Freimann 		if (irq_state.len == 0) {
3193816c7667SJens Freimann 			r = -EINVAL;
3194816c7667SJens Freimann 			break;
3195816c7667SJens Freimann 		}
3196816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
3197816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
3198816c7667SJens Freimann 					   irq_state.len);
3199816c7667SJens Freimann 		break;
3200816c7667SJens Freimann 	}
3201b0c632dbSHeiko Carstens 	default:
32023e6afcf1SCarsten Otte 		r = -ENOTTY;
3203b0c632dbSHeiko Carstens 	}
3204bc923cc9SAvi Kivity 	return r;
3205b0c632dbSHeiko Carstens }
3206b0c632dbSHeiko Carstens 
32075b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
32085b1c1493SCarsten Otte {
32095b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
32105b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
32115b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
32125b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
32135b1c1493SCarsten Otte 		get_page(vmf->page);
32145b1c1493SCarsten Otte 		return 0;
32155b1c1493SCarsten Otte 	}
32165b1c1493SCarsten Otte #endif
32175b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
32185b1c1493SCarsten Otte }
32195b1c1493SCarsten Otte 
32205587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
32215587027cSAneesh Kumar K.V 			    unsigned long npages)
3222db3fe4ebSTakuya Yoshikawa {
3223db3fe4ebSTakuya Yoshikawa 	return 0;
3224db3fe4ebSTakuya Yoshikawa }
3225db3fe4ebSTakuya Yoshikawa 
3226b0c632dbSHeiko Carstens /* Section: memory related */
3227f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
3228f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
322909170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
32307b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
3231b0c632dbSHeiko Carstens {
3232dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
3233dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
3234dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
3235dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
3236b0c632dbSHeiko Carstens 
3237598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
3238b0c632dbSHeiko Carstens 		return -EINVAL;
3239b0c632dbSHeiko Carstens 
3240598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
3241b0c632dbSHeiko Carstens 		return -EINVAL;
3242b0c632dbSHeiko Carstens 
3243a3a92c31SDominik Dingel 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3244a3a92c31SDominik Dingel 		return -EINVAL;
3245a3a92c31SDominik Dingel 
3246f7784b8eSMarcelo Tosatti 	return 0;
3247f7784b8eSMarcelo Tosatti }
3248f7784b8eSMarcelo Tosatti 
3249f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
325009170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
32518482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
3252f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
32538482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
3254f7784b8eSMarcelo Tosatti {
3255f7850c92SCarsten Otte 	int rc;
3256f7784b8eSMarcelo Tosatti 
32572cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
32582cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
32592cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
32602cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
32612cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
32622cef4debSChristian Borntraeger 	 */
32632cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
32642cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
32652cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
32662cef4debSChristian Borntraeger 		return;
3267598841caSCarsten Otte 
3268598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3269598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
3270598841caSCarsten Otte 	if (rc)
3271ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
3272598841caSCarsten Otte 	return;
3273b0c632dbSHeiko Carstens }
3274b0c632dbSHeiko Carstens 
327560a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
327660a37709SAlexander Yarygin {
327760a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
327860a37709SAlexander Yarygin 
327960a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
328060a37709SAlexander Yarygin }
328160a37709SAlexander Yarygin 
32823491caf2SChristian Borntraeger void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
32833491caf2SChristian Borntraeger {
32843491caf2SChristian Borntraeger 	vcpu->valid_wakeup = false;
32853491caf2SChristian Borntraeger }
32863491caf2SChristian Borntraeger 
3287b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
3288b0c632dbSHeiko Carstens {
328960a37709SAlexander Yarygin 	int i;
329060a37709SAlexander Yarygin 
329107197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
329207197fd0SDavid Hildenbrand 		pr_info("SIE not available\n");
329307197fd0SDavid Hildenbrand 		return -ENODEV;
329407197fd0SDavid Hildenbrand 	}
329507197fd0SDavid Hildenbrand 
329660a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
329760a37709SAlexander Yarygin 		kvm_s390_fac_list_mask[i] |=
329860a37709SAlexander Yarygin 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
329960a37709SAlexander Yarygin 
33009d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3301b0c632dbSHeiko Carstens }
3302b0c632dbSHeiko Carstens 
3303b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
3304b0c632dbSHeiko Carstens {
3305b0c632dbSHeiko Carstens 	kvm_exit();
3306b0c632dbSHeiko Carstens }
3307b0c632dbSHeiko Carstens 
3308b0c632dbSHeiko Carstens module_init(kvm_s390_init);
3309b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
3310566af940SCornelia Huck 
3311566af940SCornelia Huck /*
3312566af940SCornelia Huck  * Enable autoloading of the kvm module.
3313566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3314566af940SCornelia Huck  * since x86 takes a different approach.
3315566af940SCornelia Huck  */
3316566af940SCornelia Huck #include <linux/miscdevice.h>
3317566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
3318566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
3319