xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 6edaa5307f3f51e4e56dc4c63f68a69d88c6ddf5)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
25b0c632dbSHeiko Carstens #include <linux/module.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
32b0c632dbSHeiko Carstens #include <asm/lowcore.h>
33fdf03650SFan Zhang #include <asm/etr.h>
34b0c632dbSHeiko Carstens #include <asm/pgtable.h>
351e133ab2SMartin Schwidefsky #include <asm/gmap.h>
36f5daba1dSHeiko Carstens #include <asm/nmi.h>
37a0616cdeSDavid Howells #include <asm/switch_to.h>
386d3da241SJens Freimann #include <asm/isc.h>
391526bf9cSChristian Borntraeger #include <asm/sclp.h>
400a763c78SDavid Hildenbrand #include <asm/cpacf.h>
410a763c78SDavid Hildenbrand #include <asm/etr.h>
428f2abe6aSChristian Borntraeger #include "kvm-s390.h"
43b0c632dbSHeiko Carstens #include "gaccess.h"
44b0c632dbSHeiko Carstens 
45ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
46ea2cdd27SDavid Hildenbrand #undef pr_fmt
47ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48ea2cdd27SDavid Hildenbrand 
495786fffaSCornelia Huck #define CREATE_TRACE_POINTS
505786fffaSCornelia Huck #include "trace.h"
51ade38c31SCornelia Huck #include "trace-s390.h"
525786fffaSCornelia Huck 
5341408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
54816c7667SJens Freimann #define LOCAL_IRQS 32
55816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5741408c28SThomas Huth 
58b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59b0c632dbSHeiko Carstens 
60b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
61b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
620eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
638f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
648f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
658f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
668f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
67ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
68ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
70a011eeb2SJanosch Frank 	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
71f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
7262bea5bfSPaolo Bonzini 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
733491caf2SChristian Borntraeger 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
74ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
75f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
76ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
77aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
78aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
79ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
807697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
81ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8869d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
89453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
90453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
91453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
92453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
93453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
948a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
95453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
96453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
97b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
98453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
99453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
100bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
10195ca2cb5SJanosch Frank 	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
102a3508fbeSDavid Hildenbrand 	{ "instruction_sie", VCPU_STAT(instruction_sie) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
104bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
1057697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
1065288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
10742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
10842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
1095288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
11042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
11142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
112cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1135288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1145288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1155288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
11642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
11742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
11842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
119388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
120e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
12141628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
122175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
123175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
124175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
125b0c632dbSHeiko Carstens 	{ NULL }
126b0c632dbSHeiko Carstens };
127b0c632dbSHeiko Carstens 
128a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
129a411edf1SDavid Hildenbrand static int nested;
130a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
131a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
132a411edf1SDavid Hildenbrand 
1339d8d5786SMichael Mueller /* upper facilities limit for kvm */
13460a37709SAlexander Yarygin unsigned long kvm_s390_fac_list_mask[16] = {
13560a37709SAlexander Yarygin 	0xffe6000000000000UL,
13660a37709SAlexander Yarygin 	0x005e000000000000UL,
1379d8d5786SMichael Mueller };
138b0c632dbSHeiko Carstens 
1399d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
14078c4b59fSMichael Mueller {
1419d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1429d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
14378c4b59fSMichael Mueller }
14478c4b59fSMichael Mueller 
14515c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
14615c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1470a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
1480a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
14915c9705fSDavid Hildenbrand 
1509d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
151a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
15278f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1539d8d5786SMichael Mueller 
154b0c632dbSHeiko Carstens /* Section: not file related */
15513a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
156b0c632dbSHeiko Carstens {
157b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
15810474ae8SAlexander Graf 	return 0;
159b0c632dbSHeiko Carstens }
160b0c632dbSHeiko Carstens 
161414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
162414d3b07SMartin Schwidefsky 			      unsigned long end);
1632c70fe44SChristian Borntraeger 
164fdf03650SFan Zhang /*
165fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
166fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
167fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
168fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
169fdf03650SFan Zhang  */
170fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
171fdf03650SFan Zhang 			  void *v)
172fdf03650SFan Zhang {
173fdf03650SFan Zhang 	struct kvm *kvm;
174fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
175fdf03650SFan Zhang 	int i;
176fdf03650SFan Zhang 	unsigned long long *delta = v;
177fdf03650SFan Zhang 
178fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
179fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
180fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
181fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
182db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
183db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
18491473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
18591473b48SDavid Hildenbrand 				vcpu->arch.vsie_block->epoch -= *delta;
186fdf03650SFan Zhang 		}
187fdf03650SFan Zhang 	}
188fdf03650SFan Zhang 	return NOTIFY_OK;
189fdf03650SFan Zhang }
190fdf03650SFan Zhang 
191fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
192fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
193fdf03650SFan Zhang };
194fdf03650SFan Zhang 
195b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
196b0c632dbSHeiko Carstens {
1972c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
198b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
199a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
200a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
201fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
202fdf03650SFan Zhang 				       &kvm_clock_notifier);
203b0c632dbSHeiko Carstens 	return 0;
204b0c632dbSHeiko Carstens }
205b0c632dbSHeiko Carstens 
206b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
207b0c632dbSHeiko Carstens {
208b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
209a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
210fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
211fdf03650SFan Zhang 					 &kvm_clock_notifier);
212b0c632dbSHeiko Carstens }
213b0c632dbSHeiko Carstens 
21422be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
21522be5a13SDavid Hildenbrand {
21622be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
21722be5a13SDavid Hildenbrand }
21822be5a13SDavid Hildenbrand 
2190a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
2200a763c78SDavid Hildenbrand {
2210a763c78SDavid Hildenbrand 	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
2220a763c78SDavid Hildenbrand 	int cc = 3; /* subfunction not available */
2230a763c78SDavid Hildenbrand 
2240a763c78SDavid Hildenbrand 	asm volatile(
2250a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
2260a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
2270a763c78SDavid Hildenbrand 		"	ipm	%0\n"
2280a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
2290a763c78SDavid Hildenbrand 		: "=d" (cc)
2300a763c78SDavid Hildenbrand 		: "d" (r0)
2310a763c78SDavid Hildenbrand 		: "cc");
2320a763c78SDavid Hildenbrand 	return cc == 0;
2330a763c78SDavid Hildenbrand }
2340a763c78SDavid Hildenbrand 
23522be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
23622be5a13SDavid Hildenbrand {
2370a763c78SDavid Hildenbrand 	int i;
2380a763c78SDavid Hildenbrand 
2390a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
2400a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
2410a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
2420a763c78SDavid Hildenbrand 	}
2430a763c78SDavid Hildenbrand 
2440a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
2450a763c78SDavid Hildenbrand 		etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
2460a763c78SDavid Hildenbrand 
2470a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
2480a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
2490a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
2500a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
2510a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
2520a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
2530a763c78SDavid Hildenbrand 	}
2540a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
2550a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
2560a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
2570a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
2580a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
2590a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
2600a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
2610a763c78SDavid Hildenbrand 	}
2620a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
2630a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
2640a763c78SDavid Hildenbrand 
26522be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
26622be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
267a3508fbeSDavid Hildenbrand 	/*
268a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
269a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
270a3508fbeSDavid Hildenbrand 	 */
271a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
272a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
273a3508fbeSDavid Hildenbrand 		return;
274a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
27519c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
27619c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
2770615a326SDavid Hildenbrand 	if (sclp.has_siif)
2780615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
27977d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
28077d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
281a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
282a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
2835630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
2845630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
28513ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
28613ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
2877fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
2887fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
2895d3876a8SDavid Hildenbrand 	/*
2905d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
2915d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
2925d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
2935d3876a8SDavid Hildenbrand 	 *
2945d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
2955d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
2965d3876a8SDavid Hildenbrand 	 *
2975d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
2985d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
2995d3876a8SDavid Hildenbrand 	 *
3005d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
3015d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
3025d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
3035d3876a8SDavid Hildenbrand 	 *
3045d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
3055d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
3065d3876a8SDavid Hildenbrand 	 */
30722be5a13SDavid Hildenbrand }
30822be5a13SDavid Hildenbrand 
309b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
310b0c632dbSHeiko Carstens {
31178f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
31278f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
31378f26131SChristian Borntraeger 		return -ENOMEM;
31478f26131SChristian Borntraeger 
31578f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
31678f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
31778f26131SChristian Borntraeger 		return -ENOMEM;
31878f26131SChristian Borntraeger 	}
31978f26131SChristian Borntraeger 
32022be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
32122be5a13SDavid Hildenbrand 
32284877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
32384877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
324b0c632dbSHeiko Carstens }
325b0c632dbSHeiko Carstens 
32678f26131SChristian Borntraeger void kvm_arch_exit(void)
32778f26131SChristian Borntraeger {
32878f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
32978f26131SChristian Borntraeger }
33078f26131SChristian Borntraeger 
331b0c632dbSHeiko Carstens /* Section: device related */
332b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
333b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
334b0c632dbSHeiko Carstens {
335b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
336b0c632dbSHeiko Carstens 		return s390_enable_sie();
337b0c632dbSHeiko Carstens 	return -EINVAL;
338b0c632dbSHeiko Carstens }
339b0c632dbSHeiko Carstens 
340784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
341b0c632dbSHeiko Carstens {
342d7b0b5ebSCarsten Otte 	int r;
343d7b0b5ebSCarsten Otte 
3442bd0ac4eSCarsten Otte 	switch (ext) {
345d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
346b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
34752e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
3481efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3491efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
3501efd0f59SCarsten Otte #endif
3513c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
35260b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
35314eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
354d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
355fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
35610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
357c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
358d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
35978599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
360f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
3616352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
36247b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
3632444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
364e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
36530ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
366816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
367d7b0b5ebSCarsten Otte 		r = 1;
368d7b0b5ebSCarsten Otte 		break;
36941408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
37041408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
37141408c28SThomas Huth 		break;
372e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
373e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
37476a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
37576a6dd72SDavid Hildenbrand 		if (sclp.has_esca && sclp.has_64bscao)
37676a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
377e726b1bdSChristian Borntraeger 		break;
378e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
379e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
380e1e2e605SNick Wang 		break;
3811526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
382abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
3831526bf9cSChristian Borntraeger 		break;
38468c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
38568c55750SEric Farman 		r = MACHINE_HAS_VX;
38668c55750SEric Farman 		break;
387c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
388c6e5f166SFan Zhang 		r = test_facility(64);
389c6e5f166SFan Zhang 		break;
3902bd0ac4eSCarsten Otte 	default:
391d7b0b5ebSCarsten Otte 		r = 0;
392b0c632dbSHeiko Carstens 	}
393d7b0b5ebSCarsten Otte 	return r;
3942bd0ac4eSCarsten Otte }
395b0c632dbSHeiko Carstens 
39615f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
39715f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
39815f36ebdSJason J. Herne {
39915f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
40015f36ebdSJason J. Herne 	unsigned long address;
40115f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
40215f36ebdSJason J. Herne 
40315f36ebdSJason J. Herne 	/* Loop over all guest pages */
40415f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
40515f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
40615f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
40715f36ebdSJason J. Herne 
4081e133ab2SMartin Schwidefsky 		if (test_and_clear_guest_dirty(gmap->mm, address))
40915f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
4101763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
4111763f8d0SChristian Borntraeger 			return;
41270c88a00SChristian Borntraeger 		cond_resched();
41315f36ebdSJason J. Herne 	}
41415f36ebdSJason J. Herne }
41515f36ebdSJason J. Herne 
416b0c632dbSHeiko Carstens /* Section: vm related */
417a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
418a6e2f683SEugene (jno) Dvurechenski 
419b0c632dbSHeiko Carstens /*
420b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
421b0c632dbSHeiko Carstens  */
422b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
423b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
424b0c632dbSHeiko Carstens {
42515f36ebdSJason J. Herne 	int r;
42615f36ebdSJason J. Herne 	unsigned long n;
4279f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
42815f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
42915f36ebdSJason J. Herne 	int is_dirty = 0;
43015f36ebdSJason J. Herne 
43115f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
43215f36ebdSJason J. Herne 
43315f36ebdSJason J. Herne 	r = -EINVAL;
43415f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
43515f36ebdSJason J. Herne 		goto out;
43615f36ebdSJason J. Herne 
4379f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
4389f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
43915f36ebdSJason J. Herne 	r = -ENOENT;
44015f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
44115f36ebdSJason J. Herne 		goto out;
44215f36ebdSJason J. Herne 
44315f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
44415f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
44515f36ebdSJason J. Herne 	if (r)
44615f36ebdSJason J. Herne 		goto out;
44715f36ebdSJason J. Herne 
44815f36ebdSJason J. Herne 	/* Clear the dirty log */
44915f36ebdSJason J. Herne 	if (is_dirty) {
45015f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
45115f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
45215f36ebdSJason J. Herne 	}
45315f36ebdSJason J. Herne 	r = 0;
45415f36ebdSJason J. Herne out:
45515f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
45615f36ebdSJason J. Herne 	return r;
457b0c632dbSHeiko Carstens }
458b0c632dbSHeiko Carstens 
459d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
460d938dc55SCornelia Huck {
461d938dc55SCornelia Huck 	int r;
462d938dc55SCornelia Huck 
463d938dc55SCornelia Huck 	if (cap->flags)
464d938dc55SCornelia Huck 		return -EINVAL;
465d938dc55SCornelia Huck 
466d938dc55SCornelia Huck 	switch (cap->cap) {
46784223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
468c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
46984223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
47084223598SCornelia Huck 		r = 0;
47184223598SCornelia Huck 		break;
4722444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
473c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
4742444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
4752444b352SDavid Hildenbrand 		r = 0;
4762444b352SDavid Hildenbrand 		break;
47768c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
4785967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
479a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
4805967c17bSDavid Hildenbrand 			r = -EBUSY;
4815967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
482c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
483c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
48418280d8bSMichael Mueller 			r = 0;
48518280d8bSMichael Mueller 		} else
48618280d8bSMichael Mueller 			r = -EINVAL;
4875967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
488c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
489c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
49068c55750SEric Farman 		break;
491c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
492c6e5f166SFan Zhang 		r = -EINVAL;
493c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
494a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
495c6e5f166SFan Zhang 			r = -EBUSY;
496c6e5f166SFan Zhang 		} else if (test_facility(64)) {
497c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
498c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
499c6e5f166SFan Zhang 			r = 0;
500c6e5f166SFan Zhang 		}
501c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
502c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
503c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
504c6e5f166SFan Zhang 		break;
505e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
506c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
507e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
508e44fc8c9SEkaterina Tumanova 		r = 0;
509e44fc8c9SEkaterina Tumanova 		break;
510d938dc55SCornelia Huck 	default:
511d938dc55SCornelia Huck 		r = -EINVAL;
512d938dc55SCornelia Huck 		break;
513d938dc55SCornelia Huck 	}
514d938dc55SCornelia Huck 	return r;
515d938dc55SCornelia Huck }
516d938dc55SCornelia Huck 
5178c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5188c0a7ce6SDominik Dingel {
5198c0a7ce6SDominik Dingel 	int ret;
5208c0a7ce6SDominik Dingel 
5218c0a7ce6SDominik Dingel 	switch (attr->attr) {
5228c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
5238c0a7ce6SDominik Dingel 		ret = 0;
524c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
525a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
526a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
5278c0a7ce6SDominik Dingel 			ret = -EFAULT;
5288c0a7ce6SDominik Dingel 		break;
5298c0a7ce6SDominik Dingel 	default:
5308c0a7ce6SDominik Dingel 		ret = -ENXIO;
5318c0a7ce6SDominik Dingel 		break;
5328c0a7ce6SDominik Dingel 	}
5338c0a7ce6SDominik Dingel 	return ret;
5348c0a7ce6SDominik Dingel }
5358c0a7ce6SDominik Dingel 
5368c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5374f718eabSDominik Dingel {
5384f718eabSDominik Dingel 	int ret;
5394f718eabSDominik Dingel 	unsigned int idx;
5404f718eabSDominik Dingel 	switch (attr->attr) {
5414f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
542f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
543c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
544e6db1d61SDominik Dingel 			break;
545e6db1d61SDominik Dingel 
5464f718eabSDominik Dingel 		ret = -EBUSY;
547c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
5484f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
549a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
5504f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
5514f718eabSDominik Dingel 			ret = 0;
5524f718eabSDominik Dingel 		}
5534f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5544f718eabSDominik Dingel 		break;
5554f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
556f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
557f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
558f9cbd9b0SDavid Hildenbrand 			break;
559c3489155SDominik Dingel 		ret = -EINVAL;
560c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
561c3489155SDominik Dingel 			break;
562c3489155SDominik Dingel 
563c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
5644f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
5654f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
566a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
5674f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
5684f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5694f718eabSDominik Dingel 		ret = 0;
5704f718eabSDominik Dingel 		break;
5718c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
5728c0a7ce6SDominik Dingel 		unsigned long new_limit;
5738c0a7ce6SDominik Dingel 
5748c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
5758c0a7ce6SDominik Dingel 			return -EINVAL;
5768c0a7ce6SDominik Dingel 
5778c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
5788c0a7ce6SDominik Dingel 			return -EFAULT;
5798c0a7ce6SDominik Dingel 
580a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
581a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
5828c0a7ce6SDominik Dingel 			return -E2BIG;
5838c0a7ce6SDominik Dingel 
584a3a92c31SDominik Dingel 		if (!new_limit)
585a3a92c31SDominik Dingel 			return -EINVAL;
586a3a92c31SDominik Dingel 
5876ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
588a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
589a3a92c31SDominik Dingel 			new_limit -= 1;
590a3a92c31SDominik Dingel 
5918c0a7ce6SDominik Dingel 		ret = -EBUSY;
5928c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
593a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
5946ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
5956ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
5968c0a7ce6SDominik Dingel 
5978c0a7ce6SDominik Dingel 			if (!new) {
5988c0a7ce6SDominik Dingel 				ret = -ENOMEM;
5998c0a7ce6SDominik Dingel 			} else {
6006ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
6018c0a7ce6SDominik Dingel 				new->private = kvm;
6028c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
6038c0a7ce6SDominik Dingel 				ret = 0;
6048c0a7ce6SDominik Dingel 			}
6058c0a7ce6SDominik Dingel 		}
6068c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
607a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
608a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
609a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
6108c0a7ce6SDominik Dingel 		break;
6118c0a7ce6SDominik Dingel 	}
6124f718eabSDominik Dingel 	default:
6134f718eabSDominik Dingel 		ret = -ENXIO;
6144f718eabSDominik Dingel 		break;
6154f718eabSDominik Dingel 	}
6164f718eabSDominik Dingel 	return ret;
6174f718eabSDominik Dingel }
6184f718eabSDominik Dingel 
619a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
620a374e892STony Krowiak 
621a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
622a374e892STony Krowiak {
623a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
624a374e892STony Krowiak 	int i;
625a374e892STony Krowiak 
6269d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
627a374e892STony Krowiak 		return -EINVAL;
628a374e892STony Krowiak 
629a374e892STony Krowiak 	mutex_lock(&kvm->lock);
630a374e892STony Krowiak 	switch (attr->attr) {
631a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
632a374e892STony Krowiak 		get_random_bytes(
633a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
634a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
635a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
636c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
637a374e892STony Krowiak 		break;
638a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
639a374e892STony Krowiak 		get_random_bytes(
640a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
641a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
642a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
643c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
644a374e892STony Krowiak 		break;
645a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
646a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
647a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
648a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
649c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
650a374e892STony Krowiak 		break;
651a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
652a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
653a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
654a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
655c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
656a374e892STony Krowiak 		break;
657a374e892STony Krowiak 	default:
658a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
659a374e892STony Krowiak 		return -ENXIO;
660a374e892STony Krowiak 	}
661a374e892STony Krowiak 
662a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
663a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
664a374e892STony Krowiak 		exit_sie(vcpu);
665a374e892STony Krowiak 	}
666a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
667a374e892STony Krowiak 	return 0;
668a374e892STony Krowiak }
669a374e892STony Krowiak 
67072f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
67172f25020SJason J. Herne {
67272f25020SJason J. Herne 	u8 gtod_high;
67372f25020SJason J. Herne 
67472f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
67572f25020SJason J. Herne 					   sizeof(gtod_high)))
67672f25020SJason J. Herne 		return -EFAULT;
67772f25020SJason J. Herne 
67872f25020SJason J. Herne 	if (gtod_high != 0)
67972f25020SJason J. Herne 		return -EINVAL;
68058c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
68172f25020SJason J. Herne 
68272f25020SJason J. Herne 	return 0;
68372f25020SJason J. Herne }
68472f25020SJason J. Herne 
68572f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
68672f25020SJason J. Herne {
6875a3d883aSDavid Hildenbrand 	u64 gtod;
68872f25020SJason J. Herne 
68972f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
69072f25020SJason J. Herne 		return -EFAULT;
69172f25020SJason J. Herne 
69225ed1675SDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, gtod);
69358c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
69472f25020SJason J. Herne 	return 0;
69572f25020SJason J. Herne }
69672f25020SJason J. Herne 
69772f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
69872f25020SJason J. Herne {
69972f25020SJason J. Herne 	int ret;
70072f25020SJason J. Herne 
70172f25020SJason J. Herne 	if (attr->flags)
70272f25020SJason J. Herne 		return -EINVAL;
70372f25020SJason J. Herne 
70472f25020SJason J. Herne 	switch (attr->attr) {
70572f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
70672f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
70772f25020SJason J. Herne 		break;
70872f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
70972f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
71072f25020SJason J. Herne 		break;
71172f25020SJason J. Herne 	default:
71272f25020SJason J. Herne 		ret = -ENXIO;
71372f25020SJason J. Herne 		break;
71472f25020SJason J. Herne 	}
71572f25020SJason J. Herne 	return ret;
71672f25020SJason J. Herne }
71772f25020SJason J. Herne 
71872f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
71972f25020SJason J. Herne {
72072f25020SJason J. Herne 	u8 gtod_high = 0;
72172f25020SJason J. Herne 
72272f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
72372f25020SJason J. Herne 					 sizeof(gtod_high)))
72472f25020SJason J. Herne 		return -EFAULT;
72558c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72672f25020SJason J. Herne 
72772f25020SJason J. Herne 	return 0;
72872f25020SJason J. Herne }
72972f25020SJason J. Herne 
73072f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
73172f25020SJason J. Herne {
7325a3d883aSDavid Hildenbrand 	u64 gtod;
73372f25020SJason J. Herne 
73460417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
73572f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
73672f25020SJason J. Herne 		return -EFAULT;
73758c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
73872f25020SJason J. Herne 
73972f25020SJason J. Herne 	return 0;
74072f25020SJason J. Herne }
74172f25020SJason J. Herne 
74272f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
74372f25020SJason J. Herne {
74472f25020SJason J. Herne 	int ret;
74572f25020SJason J. Herne 
74672f25020SJason J. Herne 	if (attr->flags)
74772f25020SJason J. Herne 		return -EINVAL;
74872f25020SJason J. Herne 
74972f25020SJason J. Herne 	switch (attr->attr) {
75072f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
75172f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
75272f25020SJason J. Herne 		break;
75372f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
75472f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
75572f25020SJason J. Herne 		break;
75672f25020SJason J. Herne 	default:
75772f25020SJason J. Herne 		ret = -ENXIO;
75872f25020SJason J. Herne 		break;
75972f25020SJason J. Herne 	}
76072f25020SJason J. Herne 	return ret;
76172f25020SJason J. Herne }
76272f25020SJason J. Herne 
763658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
764658b6edaSMichael Mueller {
765658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
766053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
767658b6edaSMichael Mueller 	int ret = 0;
768658b6edaSMichael Mueller 
769658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
770a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
771658b6edaSMichael Mueller 		ret = -EBUSY;
772658b6edaSMichael Mueller 		goto out;
773658b6edaSMichael Mueller 	}
774658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
775658b6edaSMichael Mueller 	if (!proc) {
776658b6edaSMichael Mueller 		ret = -ENOMEM;
777658b6edaSMichael Mueller 		goto out;
778658b6edaSMichael Mueller 	}
779658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
780658b6edaSMichael Mueller 			    sizeof(*proc))) {
7819bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
782053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
783053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
784053dd230SDavid Hildenbrand 		if (lowest_ibc) {
785053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
786053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
787053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
788053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
789053dd230SDavid Hildenbrand 			else
790658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
791053dd230SDavid Hildenbrand 		}
792c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
793658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
794658b6edaSMichael Mueller 	} else
795658b6edaSMichael Mueller 		ret = -EFAULT;
796658b6edaSMichael Mueller 	kfree(proc);
797658b6edaSMichael Mueller out:
798658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
799658b6edaSMichael Mueller 	return ret;
800658b6edaSMichael Mueller }
801658b6edaSMichael Mueller 
80215c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
80315c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
80415c9705fSDavid Hildenbrand {
80515c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
80615c9705fSDavid Hildenbrand 	int ret = -EBUSY;
80715c9705fSDavid Hildenbrand 
80815c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
80915c9705fSDavid Hildenbrand 		return -EFAULT;
81015c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
81115c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
81215c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
81315c9705fSDavid Hildenbrand 		return -EINVAL;
81415c9705fSDavid Hildenbrand 
81515c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
81615c9705fSDavid Hildenbrand 	if (!atomic_read(&kvm->online_vcpus)) {
81715c9705fSDavid Hildenbrand 		bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
81815c9705fSDavid Hildenbrand 			    KVM_S390_VM_CPU_FEAT_NR_BITS);
81915c9705fSDavid Hildenbrand 		ret = 0;
82015c9705fSDavid Hildenbrand 	}
82115c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
82215c9705fSDavid Hildenbrand 	return ret;
82315c9705fSDavid Hildenbrand }
82415c9705fSDavid Hildenbrand 
8250a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
8260a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
8270a763c78SDavid Hildenbrand {
8280a763c78SDavid Hildenbrand 	/*
8290a763c78SDavid Hildenbrand 	 * Once supported by kernel + hw, we have to store the subfunctions
8300a763c78SDavid Hildenbrand 	 * in kvm->arch and remember that user space configured them.
8310a763c78SDavid Hildenbrand 	 */
8320a763c78SDavid Hildenbrand 	return -ENXIO;
8330a763c78SDavid Hildenbrand }
8340a763c78SDavid Hildenbrand 
835658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
836658b6edaSMichael Mueller {
837658b6edaSMichael Mueller 	int ret = -ENXIO;
838658b6edaSMichael Mueller 
839658b6edaSMichael Mueller 	switch (attr->attr) {
840658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
841658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
842658b6edaSMichael Mueller 		break;
84315c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
84415c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
84515c9705fSDavid Hildenbrand 		break;
8460a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
8470a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
8480a763c78SDavid Hildenbrand 		break;
849658b6edaSMichael Mueller 	}
850658b6edaSMichael Mueller 	return ret;
851658b6edaSMichael Mueller }
852658b6edaSMichael Mueller 
853658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
854658b6edaSMichael Mueller {
855658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
856658b6edaSMichael Mueller 	int ret = 0;
857658b6edaSMichael Mueller 
858658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
859658b6edaSMichael Mueller 	if (!proc) {
860658b6edaSMichael Mueller 		ret = -ENOMEM;
861658b6edaSMichael Mueller 		goto out;
862658b6edaSMichael Mueller 	}
8639bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
864658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
865c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
866c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
867658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
868658b6edaSMichael Mueller 		ret = -EFAULT;
869658b6edaSMichael Mueller 	kfree(proc);
870658b6edaSMichael Mueller out:
871658b6edaSMichael Mueller 	return ret;
872658b6edaSMichael Mueller }
873658b6edaSMichael Mueller 
874658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
875658b6edaSMichael Mueller {
876658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
877658b6edaSMichael Mueller 	int ret = 0;
878658b6edaSMichael Mueller 
879658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
880658b6edaSMichael Mueller 	if (!mach) {
881658b6edaSMichael Mueller 		ret = -ENOMEM;
882658b6edaSMichael Mueller 		goto out;
883658b6edaSMichael Mueller 	}
884658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
88537c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
886c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
887981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
888658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
88994422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
890658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
891658b6edaSMichael Mueller 		ret = -EFAULT;
892658b6edaSMichael Mueller 	kfree(mach);
893658b6edaSMichael Mueller out:
894658b6edaSMichael Mueller 	return ret;
895658b6edaSMichael Mueller }
896658b6edaSMichael Mueller 
89715c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
89815c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
89915c9705fSDavid Hildenbrand {
90015c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
90115c9705fSDavid Hildenbrand 
90215c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
90315c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
90415c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
90515c9705fSDavid Hildenbrand 		return -EFAULT;
90615c9705fSDavid Hildenbrand 	return 0;
90715c9705fSDavid Hildenbrand }
90815c9705fSDavid Hildenbrand 
90915c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
91015c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
91115c9705fSDavid Hildenbrand {
91215c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
91315c9705fSDavid Hildenbrand 
91415c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat,
91515c9705fSDavid Hildenbrand 		    kvm_s390_available_cpu_feat,
91615c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
91715c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
91815c9705fSDavid Hildenbrand 		return -EFAULT;
91915c9705fSDavid Hildenbrand 	return 0;
92015c9705fSDavid Hildenbrand }
92115c9705fSDavid Hildenbrand 
9220a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
9230a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
9240a763c78SDavid Hildenbrand {
9250a763c78SDavid Hildenbrand 	/*
9260a763c78SDavid Hildenbrand 	 * Once we can actually configure subfunctions (kernel + hw support),
9270a763c78SDavid Hildenbrand 	 * we have to check if they were already set by user space, if so copy
9280a763c78SDavid Hildenbrand 	 * them from kvm->arch.
9290a763c78SDavid Hildenbrand 	 */
9300a763c78SDavid Hildenbrand 	return -ENXIO;
9310a763c78SDavid Hildenbrand }
9320a763c78SDavid Hildenbrand 
9330a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
9340a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
9350a763c78SDavid Hildenbrand {
9360a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
9370a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
9380a763c78SDavid Hildenbrand 		return -EFAULT;
9390a763c78SDavid Hildenbrand 	return 0;
9400a763c78SDavid Hildenbrand }
941658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
942658b6edaSMichael Mueller {
943658b6edaSMichael Mueller 	int ret = -ENXIO;
944658b6edaSMichael Mueller 
945658b6edaSMichael Mueller 	switch (attr->attr) {
946658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
947658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
948658b6edaSMichael Mueller 		break;
949658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
950658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
951658b6edaSMichael Mueller 		break;
95215c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
95315c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
95415c9705fSDavid Hildenbrand 		break;
95515c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
95615c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
95715c9705fSDavid Hildenbrand 		break;
9580a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
9590a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
9600a763c78SDavid Hildenbrand 		break;
9610a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
9620a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
9630a763c78SDavid Hildenbrand 		break;
964658b6edaSMichael Mueller 	}
965658b6edaSMichael Mueller 	return ret;
966658b6edaSMichael Mueller }
967658b6edaSMichael Mueller 
968f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
969f2061656SDominik Dingel {
970f2061656SDominik Dingel 	int ret;
971f2061656SDominik Dingel 
972f2061656SDominik Dingel 	switch (attr->group) {
9734f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9748c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
9754f718eabSDominik Dingel 		break;
97672f25020SJason J. Herne 	case KVM_S390_VM_TOD:
97772f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
97872f25020SJason J. Herne 		break;
979658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
980658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
981658b6edaSMichael Mueller 		break;
982a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
983a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
984a374e892STony Krowiak 		break;
985f2061656SDominik Dingel 	default:
986f2061656SDominik Dingel 		ret = -ENXIO;
987f2061656SDominik Dingel 		break;
988f2061656SDominik Dingel 	}
989f2061656SDominik Dingel 
990f2061656SDominik Dingel 	return ret;
991f2061656SDominik Dingel }
992f2061656SDominik Dingel 
993f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
994f2061656SDominik Dingel {
9958c0a7ce6SDominik Dingel 	int ret;
9968c0a7ce6SDominik Dingel 
9978c0a7ce6SDominik Dingel 	switch (attr->group) {
9988c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9998c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
10008c0a7ce6SDominik Dingel 		break;
100172f25020SJason J. Herne 	case KVM_S390_VM_TOD:
100272f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
100372f25020SJason J. Herne 		break;
1004658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1005658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1006658b6edaSMichael Mueller 		break;
10078c0a7ce6SDominik Dingel 	default:
10088c0a7ce6SDominik Dingel 		ret = -ENXIO;
10098c0a7ce6SDominik Dingel 		break;
10108c0a7ce6SDominik Dingel 	}
10118c0a7ce6SDominik Dingel 
10128c0a7ce6SDominik Dingel 	return ret;
1013f2061656SDominik Dingel }
1014f2061656SDominik Dingel 
1015f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1016f2061656SDominik Dingel {
1017f2061656SDominik Dingel 	int ret;
1018f2061656SDominik Dingel 
1019f2061656SDominik Dingel 	switch (attr->group) {
10204f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
10214f718eabSDominik Dingel 		switch (attr->attr) {
10224f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
10234f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1024f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1025f9cbd9b0SDavid Hildenbrand 			break;
10268c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
10274f718eabSDominik Dingel 			ret = 0;
10284f718eabSDominik Dingel 			break;
10294f718eabSDominik Dingel 		default:
10304f718eabSDominik Dingel 			ret = -ENXIO;
10314f718eabSDominik Dingel 			break;
10324f718eabSDominik Dingel 		}
10334f718eabSDominik Dingel 		break;
103472f25020SJason J. Herne 	case KVM_S390_VM_TOD:
103572f25020SJason J. Herne 		switch (attr->attr) {
103672f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
103772f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
103872f25020SJason J. Herne 			ret = 0;
103972f25020SJason J. Herne 			break;
104072f25020SJason J. Herne 		default:
104172f25020SJason J. Herne 			ret = -ENXIO;
104272f25020SJason J. Herne 			break;
104372f25020SJason J. Herne 		}
104472f25020SJason J. Herne 		break;
1045658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1046658b6edaSMichael Mueller 		switch (attr->attr) {
1047658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1048658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
104915c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
105015c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
10510a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1052658b6edaSMichael Mueller 			ret = 0;
1053658b6edaSMichael Mueller 			break;
10540a763c78SDavid Hildenbrand 		/* configuring subfunctions is not supported yet */
10550a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1056658b6edaSMichael Mueller 		default:
1057658b6edaSMichael Mueller 			ret = -ENXIO;
1058658b6edaSMichael Mueller 			break;
1059658b6edaSMichael Mueller 		}
1060658b6edaSMichael Mueller 		break;
1061a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1062a374e892STony Krowiak 		switch (attr->attr) {
1063a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1064a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1065a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1066a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1067a374e892STony Krowiak 			ret = 0;
1068a374e892STony Krowiak 			break;
1069a374e892STony Krowiak 		default:
1070a374e892STony Krowiak 			ret = -ENXIO;
1071a374e892STony Krowiak 			break;
1072a374e892STony Krowiak 		}
1073a374e892STony Krowiak 		break;
1074f2061656SDominik Dingel 	default:
1075f2061656SDominik Dingel 		ret = -ENXIO;
1076f2061656SDominik Dingel 		break;
1077f2061656SDominik Dingel 	}
1078f2061656SDominik Dingel 
1079f2061656SDominik Dingel 	return ret;
1080f2061656SDominik Dingel }
1081f2061656SDominik Dingel 
108230ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
108330ee2a98SJason J. Herne {
108430ee2a98SJason J. Herne 	uint8_t *keys;
108530ee2a98SJason J. Herne 	uint64_t hva;
108630ee2a98SJason J. Herne 	int i, r = 0;
108730ee2a98SJason J. Herne 
108830ee2a98SJason J. Herne 	if (args->flags != 0)
108930ee2a98SJason J. Herne 		return -EINVAL;
109030ee2a98SJason J. Herne 
109130ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
109230ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
109330ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
109430ee2a98SJason J. Herne 
109530ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
109630ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
109730ee2a98SJason J. Herne 		return -EINVAL;
109830ee2a98SJason J. Herne 
109930ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
110030ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
110130ee2a98SJason J. Herne 	if (!keys)
110230ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
110330ee2a98SJason J. Herne 	if (!keys)
110430ee2a98SJason J. Herne 		return -ENOMEM;
110530ee2a98SJason J. Herne 
1106d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
110730ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
110830ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
110930ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
111030ee2a98SJason J. Herne 			r = -EFAULT;
1111d3ed1ceeSMartin Schwidefsky 			break;
111230ee2a98SJason J. Herne 		}
111330ee2a98SJason J. Herne 
1114154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1115154c8c19SDavid Hildenbrand 		if (r)
1116d3ed1ceeSMartin Schwidefsky 			break;
111730ee2a98SJason J. Herne 	}
1118d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
111930ee2a98SJason J. Herne 
1120d3ed1ceeSMartin Schwidefsky 	if (!r) {
112130ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
112230ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
112330ee2a98SJason J. Herne 		if (r)
112430ee2a98SJason J. Herne 			r = -EFAULT;
1125d3ed1ceeSMartin Schwidefsky 	}
1126d3ed1ceeSMartin Schwidefsky 
112730ee2a98SJason J. Herne 	kvfree(keys);
112830ee2a98SJason J. Herne 	return r;
112930ee2a98SJason J. Herne }
113030ee2a98SJason J. Herne 
113130ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
113230ee2a98SJason J. Herne {
113330ee2a98SJason J. Herne 	uint8_t *keys;
113430ee2a98SJason J. Herne 	uint64_t hva;
113530ee2a98SJason J. Herne 	int i, r = 0;
113630ee2a98SJason J. Herne 
113730ee2a98SJason J. Herne 	if (args->flags != 0)
113830ee2a98SJason J. Herne 		return -EINVAL;
113930ee2a98SJason J. Herne 
114030ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
114130ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
114230ee2a98SJason J. Herne 		return -EINVAL;
114330ee2a98SJason J. Herne 
114430ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
114530ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
114630ee2a98SJason J. Herne 	if (!keys)
114730ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
114830ee2a98SJason J. Herne 	if (!keys)
114930ee2a98SJason J. Herne 		return -ENOMEM;
115030ee2a98SJason J. Herne 
115130ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
115230ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
115330ee2a98SJason J. Herne 	if (r) {
115430ee2a98SJason J. Herne 		r = -EFAULT;
115530ee2a98SJason J. Herne 		goto out;
115630ee2a98SJason J. Herne 	}
115730ee2a98SJason J. Herne 
115830ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
115914d4a425SDominik Dingel 	r = s390_enable_skey();
116014d4a425SDominik Dingel 	if (r)
116114d4a425SDominik Dingel 		goto out;
116230ee2a98SJason J. Herne 
1163d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
116430ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
116530ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
116630ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
116730ee2a98SJason J. Herne 			r = -EFAULT;
1168d3ed1ceeSMartin Schwidefsky 			break;
116930ee2a98SJason J. Herne 		}
117030ee2a98SJason J. Herne 
117130ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
117230ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
117330ee2a98SJason J. Herne 			r = -EINVAL;
1174d3ed1ceeSMartin Schwidefsky 			break;
117530ee2a98SJason J. Herne 		}
117630ee2a98SJason J. Herne 
1177fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
117830ee2a98SJason J. Herne 		if (r)
1179d3ed1ceeSMartin Schwidefsky 			break;
118030ee2a98SJason J. Herne 	}
1181d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
118230ee2a98SJason J. Herne out:
118330ee2a98SJason J. Herne 	kvfree(keys);
118430ee2a98SJason J. Herne 	return r;
118530ee2a98SJason J. Herne }
118630ee2a98SJason J. Herne 
1187b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
1188b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
1189b0c632dbSHeiko Carstens {
1190b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
1191b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1192f2061656SDominik Dingel 	struct kvm_device_attr attr;
1193b0c632dbSHeiko Carstens 	int r;
1194b0c632dbSHeiko Carstens 
1195b0c632dbSHeiko Carstens 	switch (ioctl) {
1196ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
1197ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1198ba5c1e9bSCarsten Otte 
1199ba5c1e9bSCarsten Otte 		r = -EFAULT;
1200ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
1201ba5c1e9bSCarsten Otte 			break;
1202ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
1203ba5c1e9bSCarsten Otte 		break;
1204ba5c1e9bSCarsten Otte 	}
1205d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
1206d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
1207d938dc55SCornelia Huck 		r = -EFAULT;
1208d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1209d938dc55SCornelia Huck 			break;
1210d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1211d938dc55SCornelia Huck 		break;
1212d938dc55SCornelia Huck 	}
121384223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
121484223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
121584223598SCornelia Huck 
121684223598SCornelia Huck 		r = -EINVAL;
121784223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
121884223598SCornelia Huck 			/* Set up dummy routing. */
121984223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
1220152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
122184223598SCornelia Huck 		}
122284223598SCornelia Huck 		break;
122384223598SCornelia Huck 	}
1224f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
1225f2061656SDominik Dingel 		r = -EFAULT;
1226f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1227f2061656SDominik Dingel 			break;
1228f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
1229f2061656SDominik Dingel 		break;
1230f2061656SDominik Dingel 	}
1231f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
1232f2061656SDominik Dingel 		r = -EFAULT;
1233f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1234f2061656SDominik Dingel 			break;
1235f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
1236f2061656SDominik Dingel 		break;
1237f2061656SDominik Dingel 	}
1238f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
1239f2061656SDominik Dingel 		r = -EFAULT;
1240f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1241f2061656SDominik Dingel 			break;
1242f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
1243f2061656SDominik Dingel 		break;
1244f2061656SDominik Dingel 	}
124530ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
124630ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
124730ee2a98SJason J. Herne 
124830ee2a98SJason J. Herne 		r = -EFAULT;
124930ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
125030ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
125130ee2a98SJason J. Herne 			break;
125230ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
125330ee2a98SJason J. Herne 		break;
125430ee2a98SJason J. Herne 	}
125530ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
125630ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
125730ee2a98SJason J. Herne 
125830ee2a98SJason J. Herne 		r = -EFAULT;
125930ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
126030ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
126130ee2a98SJason J. Herne 			break;
126230ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
126330ee2a98SJason J. Herne 		break;
126430ee2a98SJason J. Herne 	}
1265b0c632dbSHeiko Carstens 	default:
1266367e1319SAvi Kivity 		r = -ENOTTY;
1267b0c632dbSHeiko Carstens 	}
1268b0c632dbSHeiko Carstens 
1269b0c632dbSHeiko Carstens 	return r;
1270b0c632dbSHeiko Carstens }
1271b0c632dbSHeiko Carstens 
127245c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
127345c9b47cSTony Krowiak {
127445c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
127586044c8cSChristian Borntraeger 	u32 cc = 0;
127645c9b47cSTony Krowiak 
127786044c8cSChristian Borntraeger 	memset(config, 0, 128);
127845c9b47cSTony Krowiak 	asm volatile(
127945c9b47cSTony Krowiak 		"lgr 0,%1\n"
128045c9b47cSTony Krowiak 		"lgr 2,%2\n"
128145c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
128286044c8cSChristian Borntraeger 		"0: ipm %0\n"
128345c9b47cSTony Krowiak 		"srl %0,28\n"
128486044c8cSChristian Borntraeger 		"1:\n"
128586044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
128686044c8cSChristian Borntraeger 		: "+r" (cc)
128745c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
128845c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
128945c9b47cSTony Krowiak 	);
129045c9b47cSTony Krowiak 
129145c9b47cSTony Krowiak 	return cc;
129245c9b47cSTony Krowiak }
129345c9b47cSTony Krowiak 
129445c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
129545c9b47cSTony Krowiak {
129645c9b47cSTony Krowiak 	u8 config[128];
129745c9b47cSTony Krowiak 	int cc;
129845c9b47cSTony Krowiak 
1299a6aacc3fSHeiko Carstens 	if (test_facility(12)) {
130045c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
130145c9b47cSTony Krowiak 
130245c9b47cSTony Krowiak 		if (cc)
130345c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
130445c9b47cSTony Krowiak 		else
130545c9b47cSTony Krowiak 			return config[0] & 0x40;
130645c9b47cSTony Krowiak 	}
130745c9b47cSTony Krowiak 
130845c9b47cSTony Krowiak 	return 0;
130945c9b47cSTony Krowiak }
131045c9b47cSTony Krowiak 
131145c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
131245c9b47cSTony Krowiak {
131345c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
131445c9b47cSTony Krowiak 
131545c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
131645c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
131745c9b47cSTony Krowiak 	else
131845c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
131945c9b47cSTony Krowiak }
132045c9b47cSTony Krowiak 
13219bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
13229d8d5786SMichael Mueller {
13239bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
13249bb0ec09SDavid Hildenbrand 
13259bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
13269bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
13279bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
13289d8d5786SMichael Mueller }
13299d8d5786SMichael Mueller 
1330c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
13315102ee87STony Krowiak {
13329d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
1333c54f0d6aSDavid Hildenbrand 		return;
13345102ee87STony Krowiak 
1335c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
133645c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
13375102ee87STony Krowiak 
1338ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1339ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1340ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1341ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1342ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1343ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1344ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
13455102ee87STony Krowiak }
13465102ee87STony Krowiak 
13477d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
13487d43bafcSEugene (jno) Dvurechenski {
13497d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
13505e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
13517d43bafcSEugene (jno) Dvurechenski 	else
13527d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
13537d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
13547d43bafcSEugene (jno) Dvurechenski }
13557d43bafcSEugene (jno) Dvurechenski 
1356e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1357b0c632dbSHeiko Carstens {
135876a6dd72SDavid Hildenbrand 	gfp_t alloc_flags = GFP_KERNEL;
13599d8d5786SMichael Mueller 	int i, rc;
1360b0c632dbSHeiko Carstens 	char debug_name[16];
1361f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1362b0c632dbSHeiko Carstens 
1363e08b9637SCarsten Otte 	rc = -EINVAL;
1364e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1365e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1366e08b9637SCarsten Otte 		goto out_err;
1367e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1368e08b9637SCarsten Otte 		goto out_err;
1369e08b9637SCarsten Otte #else
1370e08b9637SCarsten Otte 	if (type)
1371e08b9637SCarsten Otte 		goto out_err;
1372e08b9637SCarsten Otte #endif
1373e08b9637SCarsten Otte 
1374b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1375b0c632dbSHeiko Carstens 	if (rc)
1376d89f5effSJan Kiszka 		goto out_err;
1377b0c632dbSHeiko Carstens 
1378b290411aSCarsten Otte 	rc = -ENOMEM;
1379b290411aSCarsten Otte 
13807d0a5e62SJanosch Frank 	ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
13817d0a5e62SJanosch Frank 
13827d43bafcSEugene (jno) Dvurechenski 	kvm->arch.use_esca = 0; /* start with basic SCA */
138376a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
138476a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
13855e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
138676a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1387b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1388d89f5effSJan Kiszka 		goto out_err;
1389f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1390c5c2c393SDavid Hildenbrand 	sca_offset += 16;
1391bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1392c5c2c393SDavid Hildenbrand 		sca_offset = 0;
1393bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
1394bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
1395f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1396b0c632dbSHeiko Carstens 
1397b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1398b0c632dbSHeiko Carstens 
13991cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1400b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
140140f5b735SDominik Dingel 		goto out_err;
1402b0c632dbSHeiko Carstens 
1403c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
1404c54f0d6aSDavid Hildenbrand 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1405c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
140640f5b735SDominik Dingel 		goto out_err;
14079d8d5786SMichael Mueller 
1408fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1409c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
141094422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
14119d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
14129d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1413c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
14149d8d5786SMichael Mueller 		else
1415c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] = 0UL;
14169d8d5786SMichael Mueller 	}
14179d8d5786SMichael Mueller 
1418981467c9SMichael Mueller 	/* Populate the facility list initially. */
1419c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1420c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1421981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1422981467c9SMichael Mueller 
142395ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
142495ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
142595ca2cb5SJanosch Frank 
14269bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
142737c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
14289d8d5786SMichael Mueller 
1429c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
14305102ee87STony Krowiak 
1431ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
14326d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
14336d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
14348a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1435a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1436ba5c1e9bSCarsten Otte 
1437b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
143878f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1439b0c632dbSHeiko Carstens 
1440e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1441e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1442a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1443e08b9637SCarsten Otte 	} else {
144432e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
1445a3a92c31SDominik Dingel 			kvm->arch.mem_limit = TASK_MAX_SIZE;
144632e6b236SGuenther Hutzl 		else
144732e6b236SGuenther Hutzl 			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
144832e6b236SGuenther Hutzl 						    sclp.hamax + 1);
14496ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1450598841caSCarsten Otte 		if (!kvm->arch.gmap)
145140f5b735SDominik Dingel 			goto out_err;
14522c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
145324eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1454e08b9637SCarsten Otte 	}
1455fa6b7fe9SCornelia Huck 
1456fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
145784223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
145872f25020SJason J. Herne 	kvm->arch.epoch = 0;
1459fa6b7fe9SCornelia Huck 
14608ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
1461a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
14628335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
14638ad35755SDavid Hildenbrand 
1464d89f5effSJan Kiszka 	return 0;
1465d89f5effSJan Kiszka out_err:
1466c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
146740f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
14687d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
146978f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1470d89f5effSJan Kiszka 	return rc;
1471b0c632dbSHeiko Carstens }
1472b0c632dbSHeiko Carstens 
1473d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1474d329c035SChristian Borntraeger {
1475d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1476ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
147767335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
14783c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
1479bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
1480a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
148127e0393fSCarsten Otte 
148227e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
14836ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
148427e0393fSCarsten Otte 
1485e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1486b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1487d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1488b31288faSKonstantin Weitz 
14896692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1490b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1491d329c035SChristian Borntraeger }
1492d329c035SChristian Borntraeger 
1493d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1494d329c035SChristian Borntraeger {
1495d329c035SChristian Borntraeger 	unsigned int i;
1496988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1497d329c035SChristian Borntraeger 
1498988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1499988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1500988a2caeSGleb Natapov 
1501988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1502988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1503d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1504988a2caeSGleb Natapov 
1505988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1506988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1507d329c035SChristian Borntraeger }
1508d329c035SChristian Borntraeger 
1509b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1510b0c632dbSHeiko Carstens {
1511d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
15127d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
1513d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
1514c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
151527e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
15166ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
1517841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
151867335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1519a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
15208335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1521b0c632dbSHeiko Carstens }
1522b0c632dbSHeiko Carstens 
1523b0c632dbSHeiko Carstens /* Section: vcpu related */
1524dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1525b0c632dbSHeiko Carstens {
15266ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
152727e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
152827e0393fSCarsten Otte 		return -ENOMEM;
15292c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1530dafd032aSDominik Dingel 
153127e0393fSCarsten Otte 	return 0;
153227e0393fSCarsten Otte }
153327e0393fSCarsten Otte 
1534a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1535a6e2f683SEugene (jno) Dvurechenski {
15365e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
15377d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
15387d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
15397d43bafcSEugene (jno) Dvurechenski 
15407d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15417d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
15427d43bafcSEugene (jno) Dvurechenski 	} else {
1543bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1544a6e2f683SEugene (jno) Dvurechenski 
1545a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1546a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
1547a6e2f683SEugene (jno) Dvurechenski 	}
15485e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
15497d43bafcSEugene (jno) Dvurechenski }
1550a6e2f683SEugene (jno) Dvurechenski 
1551eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1552a6e2f683SEugene (jno) Dvurechenski {
1553eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
1554eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
1555eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
15567d43bafcSEugene (jno) Dvurechenski 
1557eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
15587d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
15597d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
156025508824SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1561eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15627d43bafcSEugene (jno) Dvurechenski 	} else {
1563eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1564a6e2f683SEugene (jno) Dvurechenski 
1565eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1566a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1567a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1568eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1569a6e2f683SEugene (jno) Dvurechenski 	}
1570eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
15715e044315SEugene (jno) Dvurechenski }
15725e044315SEugene (jno) Dvurechenski 
15735e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
15745e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
15755e044315SEugene (jno) Dvurechenski {
15765e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
15775e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
15785e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
15795e044315SEugene (jno) Dvurechenski }
15805e044315SEugene (jno) Dvurechenski 
15815e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
15825e044315SEugene (jno) Dvurechenski {
15835e044315SEugene (jno) Dvurechenski 	int i;
15845e044315SEugene (jno) Dvurechenski 
15855e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
15865e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
15875e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
15885e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
15895e044315SEugene (jno) Dvurechenski }
15905e044315SEugene (jno) Dvurechenski 
15915e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
15925e044315SEugene (jno) Dvurechenski {
15935e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
15945e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
15955e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
15965e044315SEugene (jno) Dvurechenski 	unsigned int vcpu_idx;
15975e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
15985e044315SEugene (jno) Dvurechenski 
15995e044315SEugene (jno) Dvurechenski 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
16005e044315SEugene (jno) Dvurechenski 	if (!new_sca)
16015e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
16025e044315SEugene (jno) Dvurechenski 
16035e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
16045e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
16055e044315SEugene (jno) Dvurechenski 
16065e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
16075e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
16085e044315SEugene (jno) Dvurechenski 
16095e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
16105e044315SEugene (jno) Dvurechenski 
16115e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
16125e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
16135e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
16145e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->ecb2 |= 0x04U;
16155e044315SEugene (jno) Dvurechenski 	}
16165e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
16175e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
16185e044315SEugene (jno) Dvurechenski 
16195e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
16205e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
16215e044315SEugene (jno) Dvurechenski 
16225e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
16235e044315SEugene (jno) Dvurechenski 
16248335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
16258335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
16265e044315SEugene (jno) Dvurechenski 	return 0;
16277d43bafcSEugene (jno) Dvurechenski }
1628a6e2f683SEugene (jno) Dvurechenski 
1629a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1630a6e2f683SEugene (jno) Dvurechenski {
16315e044315SEugene (jno) Dvurechenski 	int rc;
16325e044315SEugene (jno) Dvurechenski 
16335e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
16345e044315SEugene (jno) Dvurechenski 		return true;
163576a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
16365e044315SEugene (jno) Dvurechenski 		return false;
16375e044315SEugene (jno) Dvurechenski 
16385e044315SEugene (jno) Dvurechenski 	mutex_lock(&kvm->lock);
16395e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
16405e044315SEugene (jno) Dvurechenski 	mutex_unlock(&kvm->lock);
16415e044315SEugene (jno) Dvurechenski 
16425e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1643a6e2f683SEugene (jno) Dvurechenski }
1644a6e2f683SEugene (jno) Dvurechenski 
1645dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1646dafd032aSDominik Dingel {
1647dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1648dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
164959674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
165059674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
16519eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1652b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1653b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1654b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
1655c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1656c6e5f166SFan Zhang 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1657f6aa6dc4SDavid Hildenbrand 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
1658f6aa6dc4SDavid Hildenbrand 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1659f6aa6dc4SDavid Hildenbrand 	 */
1660f6aa6dc4SDavid Hildenbrand 	if (MACHINE_HAS_VX)
166168c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
16626fd8e67dSDavid Hildenbrand 	else
16636fd8e67dSDavid Hildenbrand 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1664dafd032aSDominik Dingel 
1665dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1666dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1667dafd032aSDominik Dingel 
1668b0c632dbSHeiko Carstens 	return 0;
1669b0c632dbSHeiko Carstens }
1670b0c632dbSHeiko Carstens 
1671db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1672db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1673db0758b2SDavid Hildenbrand {
1674db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
16759c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1676db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
16779c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1678db0758b2SDavid Hildenbrand }
1679db0758b2SDavid Hildenbrand 
1680db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1681db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1682db0758b2SDavid Hildenbrand {
1683db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
16849c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1685db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1686db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
16879c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1688db0758b2SDavid Hildenbrand }
1689db0758b2SDavid Hildenbrand 
1690db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1691db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1692db0758b2SDavid Hildenbrand {
1693db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1694db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
1695db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
1696db0758b2SDavid Hildenbrand }
1697db0758b2SDavid Hildenbrand 
1698db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1699db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1700db0758b2SDavid Hildenbrand {
1701db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1702db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
1703db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
1704db0758b2SDavid Hildenbrand }
1705db0758b2SDavid Hildenbrand 
1706db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1707db0758b2SDavid Hildenbrand {
1708db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1709db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
1710db0758b2SDavid Hildenbrand 	preempt_enable();
1711db0758b2SDavid Hildenbrand }
1712db0758b2SDavid Hildenbrand 
1713db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1714db0758b2SDavid Hildenbrand {
1715db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1716db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
1717db0758b2SDavid Hildenbrand 	preempt_enable();
1718db0758b2SDavid Hildenbrand }
1719db0758b2SDavid Hildenbrand 
17204287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
17214287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
17224287f247SDavid Hildenbrand {
1723db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17249c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1725db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
1726db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
17274287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
17289c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1729db0758b2SDavid Hildenbrand 	preempt_enable();
17304287f247SDavid Hildenbrand }
17314287f247SDavid Hildenbrand 
1732db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
17334287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
17344287f247SDavid Hildenbrand {
17359c23a131SDavid Hildenbrand 	unsigned int seq;
1736db0758b2SDavid Hildenbrand 	__u64 value;
1737db0758b2SDavid Hildenbrand 
1738db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
17394287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
1740db0758b2SDavid Hildenbrand 
17419c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17429c23a131SDavid Hildenbrand 	do {
17439c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
17449c23a131SDavid Hildenbrand 		/*
17459c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
17469c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
17479c23a131SDavid Hildenbrand 		 */
17489c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1749db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
17509c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
17519c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
1752db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
17539c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
17549c23a131SDavid Hildenbrand 	preempt_enable();
1755db0758b2SDavid Hildenbrand 	return value;
17564287f247SDavid Hildenbrand }
17574287f247SDavid Hildenbrand 
1758b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1759b0c632dbSHeiko Carstens {
17609977e886SHendrik Brueckner 	/* Save host register state */
1761d0164ee2SHendrik Brueckner 	save_fpu_regs();
17629abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
17639abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
176496b2d7a8SHendrik Brueckner 
17656fd8e67dSDavid Hildenbrand 	if (MACHINE_HAS_VX)
17669abc2a08SDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
17676fd8e67dSDavid Hildenbrand 	else
17686fd8e67dSDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
17699abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
17709977e886SHendrik Brueckner 	if (test_fp_ctl(current->thread.fpu.fpc))
177196b2d7a8SHendrik Brueckner 		/* User space provided an invalid FPC, let's clear it */
17729977e886SHendrik Brueckner 		current->thread.fpu.fpc = 0;
17739977e886SHendrik Brueckner 
17749977e886SHendrik Brueckner 	save_access_regs(vcpu->arch.host_acrs);
177559674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
177637d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
1777805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
17785ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1779db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
178001a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
1781b0c632dbSHeiko Carstens }
1782b0c632dbSHeiko Carstens 
1783b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1784b0c632dbSHeiko Carstens {
178501a745acSDavid Hildenbrand 	vcpu->cpu = -1;
17865ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1787db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
1788805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
178937d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
179037d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
17919977e886SHendrik Brueckner 
17929abc2a08SDavid Hildenbrand 	/* Save guest register state */
1793d0164ee2SHendrik Brueckner 	save_fpu_regs();
17949977e886SHendrik Brueckner 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
17959abc2a08SDavid Hildenbrand 
17969abc2a08SDavid Hildenbrand 	/* Restore host register state */
17979abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
17989abc2a08SDavid Hildenbrand 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
17999977e886SHendrik Brueckner 
18009977e886SHendrik Brueckner 	save_access_regs(vcpu->run->s.regs.acrs);
1801b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1802b0c632dbSHeiko Carstens }
1803b0c632dbSHeiko Carstens 
1804b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1805b0c632dbSHeiko Carstens {
1806b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1807b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1808b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
18098d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
18104287f247SDavid Hildenbrand 	kvm_s390_set_cpu_timer(vcpu, 0);
1811b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1812b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1813b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1814b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1815b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
18169abc2a08SDavid Hildenbrand 	/* make sure the new fpc will be lazily loaded */
18179abc2a08SDavid Hildenbrand 	save_fpu_regs();
18189abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = 0;
1819b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1820672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
18213c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
18223c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
18236352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
18246852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
18252ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1826b0c632dbSHeiko Carstens }
1827b0c632dbSHeiko Carstens 
182831928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
182942897d86SMarcelo Tosatti {
183072f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1831fdf03650SFan Zhang 	preempt_disable();
183272f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1833fdf03650SFan Zhang 	preempt_enable();
183472f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
183525508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
1836dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1837eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
183825508824SDavid Hildenbrand 	}
183937d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
184037d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
184142897d86SMarcelo Tosatti }
184242897d86SMarcelo Tosatti 
18435102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
18445102ee87STony Krowiak {
18459d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
18465102ee87STony Krowiak 		return;
18475102ee87STony Krowiak 
1848a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1849a374e892STony Krowiak 
1850a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1851a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1852a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1853a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1854a374e892STony Krowiak 
18555102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
18565102ee87STony Krowiak }
18575102ee87STony Krowiak 
1858b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1859b31605c1SDominik Dingel {
1860b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1861b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1862b31605c1SDominik Dingel }
1863b31605c1SDominik Dingel 
1864b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1865b31605c1SDominik Dingel {
1866b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1867b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1868b31605c1SDominik Dingel 		return -ENOMEM;
1869b31605c1SDominik Dingel 
1870b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1871b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1872b31605c1SDominik Dingel 	return 0;
1873b31605c1SDominik Dingel }
1874b31605c1SDominik Dingel 
187591520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
187691520f1aSMichael Mueller {
187791520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
187891520f1aSMichael Mueller 
187991520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
188080bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
1881c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
188291520f1aSMichael Mueller }
188391520f1aSMichael Mueller 
1884b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1885b0c632dbSHeiko Carstens {
1886b31605c1SDominik Dingel 	int rc = 0;
1887b31288faSKonstantin Weitz 
18889e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
18899e6dabefSCornelia Huck 						    CPUSTAT_SM |
1890a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1891a4a4f191SGuenther Hutzl 
189253df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
1893805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
189453df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1895805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1896a4a4f191SGuenther Hutzl 
189791520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
189891520f1aSMichael Mueller 
1899bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1900bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
1901bdab09f3SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x02;
1902bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
1903bd50e8ecSDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x04;
1904f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
19057feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
19067feb6bb8SMichael Mueller 
1907873b425eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1908d6af0b49SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x08;
190948ee7d3aSDavid Hildenbrand 	vcpu->arch.sie_block->eca = 0x1002000U;
191048ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
191148ee7d3aSDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x80000000U;
191211ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
191311ad65b7SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x40000000U;
191437c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1915217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
191637c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1917ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1918c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1919c6e5f166SFan Zhang 		vcpu->arch.sie_block->ecb3 |= 0x01;
192018280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
192113211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
192213211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
192313211ea7SEric Farman 	}
1924c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1925492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
192695ca2cb5SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 74))
192795ca2cb5SJanosch Frank 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
19285a5e6536SMatthew Rosato 
1929e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1930b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1931b31605c1SDominik Dingel 		if (rc)
1932b31605c1SDominik Dingel 			return rc;
1933b31288faSKonstantin Weitz 	}
19340ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1935ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
19369d8d5786SMichael Mueller 
19375102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
19385102ee87STony Krowiak 
1939b31605c1SDominik Dingel 	return rc;
1940b0c632dbSHeiko Carstens }
1941b0c632dbSHeiko Carstens 
1942b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1943b0c632dbSHeiko Carstens 				      unsigned int id)
1944b0c632dbSHeiko Carstens {
19454d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
19467feb6bb8SMichael Mueller 	struct sie_page *sie_page;
19474d47555aSCarsten Otte 	int rc = -EINVAL;
1948b0c632dbSHeiko Carstens 
19494215825eSDavid Hildenbrand 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
19504d47555aSCarsten Otte 		goto out;
19514d47555aSCarsten Otte 
19524d47555aSCarsten Otte 	rc = -ENOMEM;
19534d47555aSCarsten Otte 
1954b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1955b0c632dbSHeiko Carstens 	if (!vcpu)
19564d47555aSCarsten Otte 		goto out;
1957b0c632dbSHeiko Carstens 
19587feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
19597feb6bb8SMichael Mueller 	if (!sie_page)
1960b0c632dbSHeiko Carstens 		goto out_free_cpu;
1961b0c632dbSHeiko Carstens 
19627feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
19637feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
19647feb6bb8SMichael Mueller 
1965efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
1966efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
1967efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
1968efed1104SDavid Hildenbrand 
1969b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
1970ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1971ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1972d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
19735288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
19749c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
1975ba5c1e9bSCarsten Otte 
1976b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1977b0c632dbSHeiko Carstens 	if (rc)
19789abc2a08SDavid Hildenbrand 		goto out_free_sie_block;
19798335713aSChristian Borntraeger 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1980b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1981ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1982b0c632dbSHeiko Carstens 
1983b0c632dbSHeiko Carstens 	return vcpu;
19847b06bf2fSWei Yongjun out_free_sie_block:
19857b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1986b0c632dbSHeiko Carstens out_free_cpu:
1987b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
19884d47555aSCarsten Otte out:
1989b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1990b0c632dbSHeiko Carstens }
1991b0c632dbSHeiko Carstens 
1992b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1993b0c632dbSHeiko Carstens {
19949a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1995b0c632dbSHeiko Carstens }
1996b0c632dbSHeiko Carstens 
199727406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
199849b99e1eSChristian Borntraeger {
1999805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
200061a6df54SDavid Hildenbrand 	exit_sie(vcpu);
200149b99e1eSChristian Borntraeger }
200249b99e1eSChristian Borntraeger 
200327406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
200449b99e1eSChristian Borntraeger {
2005805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
200649b99e1eSChristian Borntraeger }
200749b99e1eSChristian Borntraeger 
20088e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
20098e236546SChristian Borntraeger {
2010805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
201161a6df54SDavid Hildenbrand 	exit_sie(vcpu);
20128e236546SChristian Borntraeger }
20138e236546SChristian Borntraeger 
20148e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
20158e236546SChristian Borntraeger {
20169bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
20178e236546SChristian Borntraeger }
20188e236546SChristian Borntraeger 
201949b99e1eSChristian Borntraeger /*
202049b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
202149b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
202249b99e1eSChristian Borntraeger  * return immediately. */
202349b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
202449b99e1eSChristian Borntraeger {
2025805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
202649b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
202749b99e1eSChristian Borntraeger 		cpu_relax();
202849b99e1eSChristian Borntraeger }
202949b99e1eSChristian Borntraeger 
20308e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
20318e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
203249b99e1eSChristian Borntraeger {
20338e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
20348e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
203549b99e1eSChristian Borntraeger }
203649b99e1eSChristian Borntraeger 
2037414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2038414d3b07SMartin Schwidefsky 			      unsigned long end)
20392c70fe44SChristian Borntraeger {
20402c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
20412c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
2042414d3b07SMartin Schwidefsky 	unsigned long prefix;
2043414d3b07SMartin Schwidefsky 	int i;
20442c70fe44SChristian Borntraeger 
204565d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
204665d0b0d4SDavid Hildenbrand 		return;
2047414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
2048414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
2049414d3b07SMartin Schwidefsky 		return;
20502c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
20512c70fe44SChristian Borntraeger 		/* match against both prefix pages */
2052414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
2053414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2054414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2055414d3b07SMartin Schwidefsky 				   start, end);
20568e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
20572c70fe44SChristian Borntraeger 		}
20582c70fe44SChristian Borntraeger 	}
20592c70fe44SChristian Borntraeger }
20602c70fe44SChristian Borntraeger 
2061b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2062b6d33834SChristoffer Dall {
2063b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
2064b6d33834SChristoffer Dall 	BUG();
2065b6d33834SChristoffer Dall 	return 0;
2066b6d33834SChristoffer Dall }
2067b6d33834SChristoffer Dall 
206814eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
206914eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
207014eebd91SCarsten Otte {
207114eebd91SCarsten Otte 	int r = -EINVAL;
207214eebd91SCarsten Otte 
207314eebd91SCarsten Otte 	switch (reg->id) {
207429b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
207529b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
207629b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
207729b7c71bSCarsten Otte 		break;
207829b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
207929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
208029b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
208129b7c71bSCarsten Otte 		break;
208246a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
20834287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
208446a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
208546a6dd1cSJason J. herne 		break;
208646a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
208746a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
208846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
208946a6dd1cSJason J. herne 		break;
2090536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2091536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
2092536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2093536336c2SDominik Dingel 		break;
2094536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2095536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
2096536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2097536336c2SDominik Dingel 		break;
2098536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2099536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
2100536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2101536336c2SDominik Dingel 		break;
2102672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2103672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
2104672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2105672550fbSChristian Borntraeger 		break;
2106afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2107afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
2108afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2109afa45ff5SChristian Borntraeger 		break;
211014eebd91SCarsten Otte 	default:
211114eebd91SCarsten Otte 		break;
211214eebd91SCarsten Otte 	}
211314eebd91SCarsten Otte 
211414eebd91SCarsten Otte 	return r;
211514eebd91SCarsten Otte }
211614eebd91SCarsten Otte 
211714eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
211814eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
211914eebd91SCarsten Otte {
212014eebd91SCarsten Otte 	int r = -EINVAL;
21214287f247SDavid Hildenbrand 	__u64 val;
212214eebd91SCarsten Otte 
212314eebd91SCarsten Otte 	switch (reg->id) {
212429b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
212529b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
212629b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
212729b7c71bSCarsten Otte 		break;
212829b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
212929b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
213029b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
213129b7c71bSCarsten Otte 		break;
213246a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
21334287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
21344287f247SDavid Hildenbrand 		if (!r)
21354287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
213646a6dd1cSJason J. herne 		break;
213746a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
213846a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
213946a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
214046a6dd1cSJason J. herne 		break;
2141536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2142536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
2143536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
21449fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
21459fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2146536336c2SDominik Dingel 		break;
2147536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2148536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
2149536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2150536336c2SDominik Dingel 		break;
2151536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2152536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
2153536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2154536336c2SDominik Dingel 		break;
2155672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2156672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
2157672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2158672550fbSChristian Borntraeger 		break;
2159afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2160afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
2161afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2162afa45ff5SChristian Borntraeger 		break;
216314eebd91SCarsten Otte 	default:
216414eebd91SCarsten Otte 		break;
216514eebd91SCarsten Otte 	}
216614eebd91SCarsten Otte 
216714eebd91SCarsten Otte 	return r;
216814eebd91SCarsten Otte }
2169b6d33834SChristoffer Dall 
2170b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2171b0c632dbSHeiko Carstens {
2172b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
2173b0c632dbSHeiko Carstens 	return 0;
2174b0c632dbSHeiko Carstens }
2175b0c632dbSHeiko Carstens 
2176b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2177b0c632dbSHeiko Carstens {
21785a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
2179b0c632dbSHeiko Carstens 	return 0;
2180b0c632dbSHeiko Carstens }
2181b0c632dbSHeiko Carstens 
2182b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2183b0c632dbSHeiko Carstens {
21845a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2185b0c632dbSHeiko Carstens 	return 0;
2186b0c632dbSHeiko Carstens }
2187b0c632dbSHeiko Carstens 
2188b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2189b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2190b0c632dbSHeiko Carstens {
219159674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2192b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
219359674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
2194b0c632dbSHeiko Carstens 	return 0;
2195b0c632dbSHeiko Carstens }
2196b0c632dbSHeiko Carstens 
2197b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2198b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2199b0c632dbSHeiko Carstens {
220059674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2201b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2202b0c632dbSHeiko Carstens 	return 0;
2203b0c632dbSHeiko Carstens }
2204b0c632dbSHeiko Carstens 
2205b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2206b0c632dbSHeiko Carstens {
22079abc2a08SDavid Hildenbrand 	/* make sure the new values will be lazily loaded */
22089abc2a08SDavid Hildenbrand 	save_fpu_regs();
22094725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
22104725c860SMartin Schwidefsky 		return -EINVAL;
22119abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = fpu->fpc;
22129abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22139abc2a08SDavid Hildenbrand 		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
22149abc2a08SDavid Hildenbrand 	else
22159abc2a08SDavid Hildenbrand 		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
2216b0c632dbSHeiko Carstens 	return 0;
2217b0c632dbSHeiko Carstens }
2218b0c632dbSHeiko Carstens 
2219b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2220b0c632dbSHeiko Carstens {
22219abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
22229abc2a08SDavid Hildenbrand 	save_fpu_regs();
22239abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22249abc2a08SDavid Hildenbrand 		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
22259abc2a08SDavid Hildenbrand 	else
22269abc2a08SDavid Hildenbrand 		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
22279abc2a08SDavid Hildenbrand 	fpu->fpc = current->thread.fpu.fpc;
2228b0c632dbSHeiko Carstens 	return 0;
2229b0c632dbSHeiko Carstens }
2230b0c632dbSHeiko Carstens 
2231b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2232b0c632dbSHeiko Carstens {
2233b0c632dbSHeiko Carstens 	int rc = 0;
2234b0c632dbSHeiko Carstens 
22357a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
2236b0c632dbSHeiko Carstens 		rc = -EBUSY;
2237d7b0b5ebSCarsten Otte 	else {
2238d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
2239d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
2240d7b0b5ebSCarsten Otte 	}
2241b0c632dbSHeiko Carstens 	return rc;
2242b0c632dbSHeiko Carstens }
2243b0c632dbSHeiko Carstens 
2244b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2245b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
2246b0c632dbSHeiko Carstens {
2247b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
2248b0c632dbSHeiko Carstens }
2249b0c632dbSHeiko Carstens 
225027291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
225127291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
225227291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
225327291e21SDavid Hildenbrand 
2254d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2255d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
2256b0c632dbSHeiko Carstens {
225727291e21SDavid Hildenbrand 	int rc = 0;
225827291e21SDavid Hildenbrand 
225927291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
226027291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
226127291e21SDavid Hildenbrand 
22622de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
226327291e21SDavid Hildenbrand 		return -EINVAL;
226489b5b4deSDavid Hildenbrand 	if (!sclp.has_gpere)
226589b5b4deSDavid Hildenbrand 		return -EINVAL;
226627291e21SDavid Hildenbrand 
226727291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
226827291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
226927291e21SDavid Hildenbrand 		/* enforce guest PER */
2270805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
227127291e21SDavid Hildenbrand 
227227291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
227327291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
227427291e21SDavid Hildenbrand 	} else {
2275805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
227627291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
227727291e21SDavid Hildenbrand 	}
227827291e21SDavid Hildenbrand 
227927291e21SDavid Hildenbrand 	if (rc) {
228027291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
228127291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
2282805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
228327291e21SDavid Hildenbrand 	}
228427291e21SDavid Hildenbrand 
228527291e21SDavid Hildenbrand 	return rc;
2286b0c632dbSHeiko Carstens }
2287b0c632dbSHeiko Carstens 
228862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
228962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
229062d9f0dbSMarcelo Tosatti {
22916352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
22926352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
22936352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
229462d9f0dbSMarcelo Tosatti }
229562d9f0dbSMarcelo Tosatti 
229662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
229762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
229862d9f0dbSMarcelo Tosatti {
22996352e4d2SDavid Hildenbrand 	int rc = 0;
23006352e4d2SDavid Hildenbrand 
23016352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
23026352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
23036352e4d2SDavid Hildenbrand 
23046352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
23056352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
23066352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
23076352e4d2SDavid Hildenbrand 		break;
23086352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
23096352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
23106352e4d2SDavid Hildenbrand 		break;
23116352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
23126352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
23136352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
23146352e4d2SDavid Hildenbrand 	default:
23156352e4d2SDavid Hildenbrand 		rc = -ENXIO;
23166352e4d2SDavid Hildenbrand 	}
23176352e4d2SDavid Hildenbrand 
23186352e4d2SDavid Hildenbrand 	return rc;
231962d9f0dbSMarcelo Tosatti }
232062d9f0dbSMarcelo Tosatti 
23218ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
23228ad35755SDavid Hildenbrand {
23238ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
23248ad35755SDavid Hildenbrand }
23258ad35755SDavid Hildenbrand 
23262c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
23272c70fe44SChristian Borntraeger {
23288ad35755SDavid Hildenbrand retry:
23298e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
2330586b7ccdSChristian Borntraeger 	if (!vcpu->requests)
2331586b7ccdSChristian Borntraeger 		return 0;
23322c70fe44SChristian Borntraeger 	/*
23332c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2334b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
23352c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
23362c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
23372c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
23382c70fe44SChristian Borntraeger 	 */
23398ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
23402c70fe44SChristian Borntraeger 		int rc;
2341b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
2342fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
2343b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
23442c70fe44SChristian Borntraeger 		if (rc)
23452c70fe44SChristian Borntraeger 			return rc;
23468ad35755SDavid Hildenbrand 		goto retry;
23472c70fe44SChristian Borntraeger 	}
23488ad35755SDavid Hildenbrand 
2349d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2350d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
2351d3d692c8SDavid Hildenbrand 		goto retry;
2352d3d692c8SDavid Hildenbrand 	}
2353d3d692c8SDavid Hildenbrand 
23548ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
23558ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
23568ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2357805de8f4SPeter Zijlstra 			atomic_or(CPUSTAT_IBS,
23588ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
23598ad35755SDavid Hildenbrand 		}
23608ad35755SDavid Hildenbrand 		goto retry;
23618ad35755SDavid Hildenbrand 	}
23628ad35755SDavid Hildenbrand 
23638ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
23648ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
23658ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2366805de8f4SPeter Zijlstra 			atomic_andnot(CPUSTAT_IBS,
23678ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
23688ad35755SDavid Hildenbrand 		}
23698ad35755SDavid Hildenbrand 		goto retry;
23708ad35755SDavid Hildenbrand 	}
23718ad35755SDavid Hildenbrand 
23720759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
23730759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
23740759d068SDavid Hildenbrand 
23752c70fe44SChristian Borntraeger 	return 0;
23762c70fe44SChristian Borntraeger }
23772c70fe44SChristian Borntraeger 
237825ed1675SDavid Hildenbrand void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
237925ed1675SDavid Hildenbrand {
238025ed1675SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
238125ed1675SDavid Hildenbrand 	int i;
238225ed1675SDavid Hildenbrand 
238325ed1675SDavid Hildenbrand 	mutex_lock(&kvm->lock);
238425ed1675SDavid Hildenbrand 	preempt_disable();
238525ed1675SDavid Hildenbrand 	kvm->arch.epoch = tod - get_tod_clock();
238625ed1675SDavid Hildenbrand 	kvm_s390_vcpu_block_all(kvm);
238725ed1675SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm)
238825ed1675SDavid Hildenbrand 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
238925ed1675SDavid Hildenbrand 	kvm_s390_vcpu_unblock_all(kvm);
239025ed1675SDavid Hildenbrand 	preempt_enable();
239125ed1675SDavid Hildenbrand 	mutex_unlock(&kvm->lock);
239225ed1675SDavid Hildenbrand }
239325ed1675SDavid Hildenbrand 
2394fa576c58SThomas Huth /**
2395fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
2396fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
2397fa576c58SThomas Huth  * @gpa: Guest physical address
2398fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
2399fa576c58SThomas Huth  *
2400fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
2401fa576c58SThomas Huth  *
2402fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
2403fa576c58SThomas Huth  */
2404fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
240524eb3a82SDominik Dingel {
2406527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
2407527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
240824eb3a82SDominik Dingel }
240924eb3a82SDominik Dingel 
24103c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
24113c038e6bSDominik Dingel 				      unsigned long token)
24123c038e6bSDominik Dingel {
24133c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
2414383d0b05SJens Freimann 	struct kvm_s390_irq irq;
24153c038e6bSDominik Dingel 
24163c038e6bSDominik Dingel 	if (start_token) {
2417383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
2418383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
2419383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
24203c038e6bSDominik Dingel 	} else {
24213c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
2422383d0b05SJens Freimann 		inti.parm64 = token;
24233c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
24243c038e6bSDominik Dingel 	}
24253c038e6bSDominik Dingel }
24263c038e6bSDominik Dingel 
24273c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
24283c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
24293c038e6bSDominik Dingel {
24303c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
24313c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
24323c038e6bSDominik Dingel }
24333c038e6bSDominik Dingel 
24343c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
24353c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
24363c038e6bSDominik Dingel {
24373c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
24383c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
24393c038e6bSDominik Dingel }
24403c038e6bSDominik Dingel 
24413c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
24423c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
24433c038e6bSDominik Dingel {
24443c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
24453c038e6bSDominik Dingel }
24463c038e6bSDominik Dingel 
24473c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
24483c038e6bSDominik Dingel {
24493c038e6bSDominik Dingel 	/*
24503c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
24513c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
24523c038e6bSDominik Dingel 	 */
24533c038e6bSDominik Dingel 	return true;
24543c038e6bSDominik Dingel }
24553c038e6bSDominik Dingel 
24563c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
24573c038e6bSDominik Dingel {
24583c038e6bSDominik Dingel 	hva_t hva;
24593c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
24603c038e6bSDominik Dingel 	int rc;
24613c038e6bSDominik Dingel 
24623c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
24633c038e6bSDominik Dingel 		return 0;
24643c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
24653c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
24663c038e6bSDominik Dingel 		return 0;
24673c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
24683c038e6bSDominik Dingel 		return 0;
24699a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
24703c038e6bSDominik Dingel 		return 0;
24713c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
24723c038e6bSDominik Dingel 		return 0;
24733c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
24743c038e6bSDominik Dingel 		return 0;
24753c038e6bSDominik Dingel 
247681480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
247781480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
247881480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
24793c038e6bSDominik Dingel 		return 0;
24803c038e6bSDominik Dingel 
24813c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
24823c038e6bSDominik Dingel 	return rc;
24833c038e6bSDominik Dingel }
24843c038e6bSDominik Dingel 
24853fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2486b0c632dbSHeiko Carstens {
24873fb4c40fSThomas Huth 	int rc, cpuflags;
2488e168bf8dSCarsten Otte 
24893c038e6bSDominik Dingel 	/*
24903c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
24913c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
24923c038e6bSDominik Dingel 	 * handled outside the worker.
24933c038e6bSDominik Dingel 	 */
24943c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
24953c038e6bSDominik Dingel 
24967ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
24977ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2498b0c632dbSHeiko Carstens 
2499b0c632dbSHeiko Carstens 	if (need_resched())
2500b0c632dbSHeiko Carstens 		schedule();
2501b0c632dbSHeiko Carstens 
2502d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
250371cde587SChristian Borntraeger 		s390_handle_mcck();
250471cde587SChristian Borntraeger 
250579395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
250679395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
250779395031SJens Freimann 		if (rc)
250879395031SJens Freimann 			return rc;
250979395031SJens Freimann 	}
25100ff31867SCarsten Otte 
25112c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
25122c70fe44SChristian Borntraeger 	if (rc)
25132c70fe44SChristian Borntraeger 		return rc;
25142c70fe44SChristian Borntraeger 
251527291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
251627291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
251727291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
251827291e21SDavid Hildenbrand 	}
251927291e21SDavid Hildenbrand 
2520b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
25213fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
25223fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
25233fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
25242b29a9fdSDominik Dingel 
25253fb4c40fSThomas Huth 	return 0;
25263fb4c40fSThomas Huth }
25273fb4c40fSThomas Huth 
2528492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2529492d8642SThomas Huth {
253056317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
253156317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
253256317920SDavid Hildenbrand 	};
253356317920SDavid Hildenbrand 	u8 opcode, ilen;
2534492d8642SThomas Huth 	int rc;
2535492d8642SThomas Huth 
2536492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2537492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2538492d8642SThomas Huth 
2539492d8642SThomas Huth 	/*
2540492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2541492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2542492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2543492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2544492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2545492d8642SThomas Huth 	 * to be able to forward the PSW.
2546492d8642SThomas Huth 	 */
254765977322SDavid Hildenbrand 	rc = read_guest_instr(vcpu, &opcode, 1);
254856317920SDavid Hildenbrand 	ilen = insn_length(opcode);
25499b0d721aSDavid Hildenbrand 	if (rc < 0) {
25509b0d721aSDavid Hildenbrand 		return rc;
25519b0d721aSDavid Hildenbrand 	} else if (rc) {
25529b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
25539b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
25549b0d721aSDavid Hildenbrand 		 * nullification if necessary.
25559b0d721aSDavid Hildenbrand 		 */
25569b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
25579b0d721aSDavid Hildenbrand 		ilen = 4;
25589b0d721aSDavid Hildenbrand 	}
255956317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
256056317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
256156317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2562492d8642SThomas Huth }
2563492d8642SThomas Huth 
25643fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
25653fb4c40fSThomas Huth {
25662b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
25672b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
25682b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
25692b29a9fdSDominik Dingel 
257027291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
257127291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
257227291e21SDavid Hildenbrand 
25737ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
25747ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
257571f116bfSDavid Hildenbrand 
257671f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
257771f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
257871f116bfSDavid Hildenbrand 
257971f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
258071f116bfSDavid Hildenbrand 			return rc;
258171f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
258271f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
258371f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
258471f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
258571f116bfSDavid Hildenbrand 		return -EREMOTE;
258671f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
258771f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
258871f116bfSDavid Hildenbrand 		return 0;
2589210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2590210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2591210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2592210b1607SThomas Huth 						current->thread.gmap_addr;
2593210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
259471f116bfSDavid Hildenbrand 		return -EREMOTE;
259524eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
25963c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
259724eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
259871f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
259971f116bfSDavid Hildenbrand 			return 0;
260071f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2601fa576c58SThomas Huth 	}
260271f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
26033fb4c40fSThomas Huth }
26043fb4c40fSThomas Huth 
26053fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
26063fb4c40fSThomas Huth {
26073fb4c40fSThomas Huth 	int rc, exit_reason;
26083fb4c40fSThomas Huth 
2609800c1065SThomas Huth 	/*
2610800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2611800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2612800c1065SThomas Huth 	 */
2613800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2614800c1065SThomas Huth 
2615a76ccff6SThomas Huth 	do {
26163fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
26173fb4c40fSThomas Huth 		if (rc)
2618a76ccff6SThomas Huth 			break;
26193fb4c40fSThomas Huth 
2620800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
26213fb4c40fSThomas Huth 		/*
2622a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2623a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
26243fb4c40fSThomas Huth 		 */
26250097d12eSChristian Borntraeger 		local_irq_disable();
2626*6edaa530SPaolo Bonzini 		guest_enter_irqoff();
2627db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
26280097d12eSChristian Borntraeger 		local_irq_enable();
2629a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2630a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
26310097d12eSChristian Borntraeger 		local_irq_disable();
2632db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
2633*6edaa530SPaolo Bonzini 		guest_exit_irqoff();
26340097d12eSChristian Borntraeger 		local_irq_enable();
2635800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
26363fb4c40fSThomas Huth 
26373fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
263827291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
26393fb4c40fSThomas Huth 
2640800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2641e168bf8dSCarsten Otte 	return rc;
2642b0c632dbSHeiko Carstens }
2643b0c632dbSHeiko Carstens 
2644b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2645b028ee3eSDavid Hildenbrand {
2646b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2647b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2648b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2649b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2650b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2651b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2652d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2653d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2654b028ee3eSDavid Hildenbrand 	}
2655b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
26564287f247SDavid Hildenbrand 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2657b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2658b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2659b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2660b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2661b028ee3eSDavid Hildenbrand 	}
2662b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2663b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2664b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2665b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
26669fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
26679fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2668b028ee3eSDavid Hildenbrand 	}
2669b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2670b028ee3eSDavid Hildenbrand }
2671b028ee3eSDavid Hildenbrand 
2672b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2673b028ee3eSDavid Hildenbrand {
2674b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2675b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2676b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2677b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
26784287f247SDavid Hildenbrand 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2679b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2680b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2681b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2682b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2683b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2684b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2685b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2686b028ee3eSDavid Hildenbrand }
2687b028ee3eSDavid Hildenbrand 
2688b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2689b0c632dbSHeiko Carstens {
26908f2abe6aSChristian Borntraeger 	int rc;
2691b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2692b0c632dbSHeiko Carstens 
269327291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
269427291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
269527291e21SDavid Hildenbrand 		return 0;
269627291e21SDavid Hildenbrand 	}
269727291e21SDavid Hildenbrand 
2698b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2699b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2700b0c632dbSHeiko Carstens 
27016352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
27026852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
27036352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2704ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
27056352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
27066352e4d2SDavid Hildenbrand 		return -EINVAL;
27076352e4d2SDavid Hildenbrand 	}
2708b0c632dbSHeiko Carstens 
2709b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2710db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
2711d7b0b5ebSCarsten Otte 
2712dab4079dSHeiko Carstens 	might_fault();
2713e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
27149ace903dSChristian Ehrhardt 
2715b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2716b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
27178f2abe6aSChristian Borntraeger 		rc = -EINTR;
2718b1d16c49SChristian Ehrhardt 	}
27198f2abe6aSChristian Borntraeger 
272027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
272127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
272227291e21SDavid Hildenbrand 		rc = 0;
272327291e21SDavid Hildenbrand 	}
272427291e21SDavid Hildenbrand 
27258f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
272671f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
27278f2abe6aSChristian Borntraeger 		rc = 0;
27288f2abe6aSChristian Borntraeger 	}
27298f2abe6aSChristian Borntraeger 
2730db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
2731b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2732d7b0b5ebSCarsten Otte 
2733b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2734b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2735b0c632dbSHeiko Carstens 
2736b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
27377e8e6ab4SHeiko Carstens 	return rc;
2738b0c632dbSHeiko Carstens }
2739b0c632dbSHeiko Carstens 
2740b0c632dbSHeiko Carstens /*
2741b0c632dbSHeiko Carstens  * store status at address
2742b0c632dbSHeiko Carstens  * we use have two special cases:
2743b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2744b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2745b0c632dbSHeiko Carstens  */
2746d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2747b0c632dbSHeiko Carstens {
2748092670cdSCarsten Otte 	unsigned char archmode = 1;
27499abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
2750fda902cbSMichael Mueller 	unsigned int px;
27514287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
2752d0bce605SHeiko Carstens 	int rc;
2753b0c632dbSHeiko Carstens 
2754d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
2755d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2756d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2757b0c632dbSHeiko Carstens 			return -EFAULT;
2758d9a3a09aSMartin Schwidefsky 		gpa = 0;
2759d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2760d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2761b0c632dbSHeiko Carstens 			return -EFAULT;
2762d9a3a09aSMartin Schwidefsky 		gpa = px;
2763d9a3a09aSMartin Schwidefsky 	} else
2764d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
27659abc2a08SDavid Hildenbrand 
27669abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
27679abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
27689522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2769d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
27709abc2a08SDavid Hildenbrand 				     fprs, 128);
27719abc2a08SDavid Hildenbrand 	} else {
27729abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
27736fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
27749abc2a08SDavid Hildenbrand 	}
2775d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2776d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2777d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2778d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2779d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2780fda902cbSMichael Mueller 			      &px, 4);
2781d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
27829abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
2783d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2784d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
27854287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
2786d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
27874287f247SDavid Hildenbrand 			      &cputm, 8);
2788178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2789d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2790d0bce605SHeiko Carstens 			      &clkcomp, 8);
2791d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2792d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2793d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2794d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2795d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2796b0c632dbSHeiko Carstens }
2797b0c632dbSHeiko Carstens 
2798e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2799e879892cSThomas Huth {
2800e879892cSThomas Huth 	/*
2801e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2802e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2803e879892cSThomas Huth 	 * it into the save area
2804e879892cSThomas Huth 	 */
2805d0164ee2SHendrik Brueckner 	save_fpu_regs();
28069abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2807e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2808e879892cSThomas Huth 
2809e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2810e879892cSThomas Huth }
2811e879892cSThomas Huth 
2812bc17de7cSEric Farman /*
2813bc17de7cSEric Farman  * store additional status at address
2814bc17de7cSEric Farman  */
2815bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2816bc17de7cSEric Farman 					unsigned long gpa)
2817bc17de7cSEric Farman {
2818bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2819bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2820bc17de7cSEric Farman 		return 0;
2821bc17de7cSEric Farman 
2822bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2823bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2824bc17de7cSEric Farman }
2825bc17de7cSEric Farman 
2826bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2827bc17de7cSEric Farman {
2828bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2829bc17de7cSEric Farman 		return 0;
2830bc17de7cSEric Farman 
2831bc17de7cSEric Farman 	/*
2832bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
28339977e886SHendrik Brueckner 	 * copying in vcpu load/put. We can simply call save_fpu_regs()
28349977e886SHendrik Brueckner 	 * to save the current register state because we are in the
28359977e886SHendrik Brueckner 	 * middle of a load/put cycle.
28369977e886SHendrik Brueckner 	 *
28379977e886SHendrik Brueckner 	 * Let's update our copies before we save it into the save area.
2838bc17de7cSEric Farman 	 */
2839d0164ee2SHendrik Brueckner 	save_fpu_regs();
2840bc17de7cSEric Farman 
2841bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2842bc17de7cSEric Farman }
2843bc17de7cSEric Farman 
28448ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28458ad35755SDavid Hildenbrand {
28468ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
28478e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
28488ad35755SDavid Hildenbrand }
28498ad35755SDavid Hildenbrand 
28508ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
28518ad35755SDavid Hildenbrand {
28528ad35755SDavid Hildenbrand 	unsigned int i;
28538ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
28548ad35755SDavid Hildenbrand 
28558ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
28568ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
28578ad35755SDavid Hildenbrand 	}
28588ad35755SDavid Hildenbrand }
28598ad35755SDavid Hildenbrand 
28608ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28618ad35755SDavid Hildenbrand {
286209a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
286309a400e7SDavid Hildenbrand 		return;
28648ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
28658e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
28668ad35755SDavid Hildenbrand }
28678ad35755SDavid Hildenbrand 
28686852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
28696852d7b6SDavid Hildenbrand {
28708ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
28718ad35755SDavid Hildenbrand 
28728ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
28738ad35755SDavid Hildenbrand 		return;
28748ad35755SDavid Hildenbrand 
28756852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
28768ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2877433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
28788ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
28798ad35755SDavid Hildenbrand 
28808ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
28818ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
28828ad35755SDavid Hildenbrand 			started_vcpus++;
28838ad35755SDavid Hildenbrand 	}
28848ad35755SDavid Hildenbrand 
28858ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
28868ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
28878ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
28888ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
28898ad35755SDavid Hildenbrand 		/*
28908ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
28918ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
28928ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
28938ad35755SDavid Hildenbrand 		 */
28948ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
28958ad35755SDavid Hildenbrand 	}
28968ad35755SDavid Hildenbrand 
2897805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
28988ad35755SDavid Hildenbrand 	/*
28998ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
29008ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
29018ad35755SDavid Hildenbrand 	 */
2902d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2903433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29048ad35755SDavid Hildenbrand 	return;
29056852d7b6SDavid Hildenbrand }
29066852d7b6SDavid Hildenbrand 
29076852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
29086852d7b6SDavid Hildenbrand {
29098ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
29108ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
29118ad35755SDavid Hildenbrand 
29128ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
29138ad35755SDavid Hildenbrand 		return;
29148ad35755SDavid Hildenbrand 
29156852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
29168ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2917433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
29188ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
29198ad35755SDavid Hildenbrand 
292032f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
29216cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
292232f5ff63SDavid Hildenbrand 
2923805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
29248ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
29258ad35755SDavid Hildenbrand 
29268ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
29278ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
29288ad35755SDavid Hildenbrand 			started_vcpus++;
29298ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
29308ad35755SDavid Hildenbrand 		}
29318ad35755SDavid Hildenbrand 	}
29328ad35755SDavid Hildenbrand 
29338ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
29348ad35755SDavid Hildenbrand 		/*
29358ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
29368ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
29378ad35755SDavid Hildenbrand 		 */
29388ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
29398ad35755SDavid Hildenbrand 	}
29408ad35755SDavid Hildenbrand 
2941433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29428ad35755SDavid Hildenbrand 	return;
29436852d7b6SDavid Hildenbrand }
29446852d7b6SDavid Hildenbrand 
2945d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2946d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2947d6712df9SCornelia Huck {
2948d6712df9SCornelia Huck 	int r;
2949d6712df9SCornelia Huck 
2950d6712df9SCornelia Huck 	if (cap->flags)
2951d6712df9SCornelia Huck 		return -EINVAL;
2952d6712df9SCornelia Huck 
2953d6712df9SCornelia Huck 	switch (cap->cap) {
2954fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2955fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2956fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2957c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2958fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2959fa6b7fe9SCornelia Huck 		}
2960fa6b7fe9SCornelia Huck 		r = 0;
2961fa6b7fe9SCornelia Huck 		break;
2962d6712df9SCornelia Huck 	default:
2963d6712df9SCornelia Huck 		r = -EINVAL;
2964d6712df9SCornelia Huck 		break;
2965d6712df9SCornelia Huck 	}
2966d6712df9SCornelia Huck 	return r;
2967d6712df9SCornelia Huck }
2968d6712df9SCornelia Huck 
296941408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
297041408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
297141408c28SThomas Huth {
297241408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
297341408c28SThomas Huth 	void *tmpbuf = NULL;
297441408c28SThomas Huth 	int r, srcu_idx;
297541408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
297641408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
297741408c28SThomas Huth 
297841408c28SThomas Huth 	if (mop->flags & ~supported_flags)
297941408c28SThomas Huth 		return -EINVAL;
298041408c28SThomas Huth 
298141408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
298241408c28SThomas Huth 		return -E2BIG;
298341408c28SThomas Huth 
298441408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
298541408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
298641408c28SThomas Huth 		if (!tmpbuf)
298741408c28SThomas Huth 			return -ENOMEM;
298841408c28SThomas Huth 	}
298941408c28SThomas Huth 
299041408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
299141408c28SThomas Huth 
299241408c28SThomas Huth 	switch (mop->op) {
299341408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
299441408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
299592c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
299692c96321SDavid Hildenbrand 					    mop->size, GACC_FETCH);
299741408c28SThomas Huth 			break;
299841408c28SThomas Huth 		}
299941408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
300041408c28SThomas Huth 		if (r == 0) {
300141408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
300241408c28SThomas Huth 				r = -EFAULT;
300341408c28SThomas Huth 		}
300441408c28SThomas Huth 		break;
300541408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
300641408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
300792c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
300892c96321SDavid Hildenbrand 					    mop->size, GACC_STORE);
300941408c28SThomas Huth 			break;
301041408c28SThomas Huth 		}
301141408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
301241408c28SThomas Huth 			r = -EFAULT;
301341408c28SThomas Huth 			break;
301441408c28SThomas Huth 		}
301541408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
301641408c28SThomas Huth 		break;
301741408c28SThomas Huth 	default:
301841408c28SThomas Huth 		r = -EINVAL;
301941408c28SThomas Huth 	}
302041408c28SThomas Huth 
302141408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
302241408c28SThomas Huth 
302341408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
302441408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
302541408c28SThomas Huth 
302641408c28SThomas Huth 	vfree(tmpbuf);
302741408c28SThomas Huth 	return r;
302841408c28SThomas Huth }
302941408c28SThomas Huth 
3030b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
3031b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
3032b0c632dbSHeiko Carstens {
3033b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
3034b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
3035800c1065SThomas Huth 	int idx;
3036bc923cc9SAvi Kivity 	long r;
3037b0c632dbSHeiko Carstens 
303893736624SAvi Kivity 	switch (ioctl) {
303947b43c52SJens Freimann 	case KVM_S390_IRQ: {
304047b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
304147b43c52SJens Freimann 
304247b43c52SJens Freimann 		r = -EFAULT;
304347b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
304447b43c52SJens Freimann 			break;
304547b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
304647b43c52SJens Freimann 		break;
304747b43c52SJens Freimann 	}
304893736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
3049ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
3050383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
3051ba5c1e9bSCarsten Otte 
305293736624SAvi Kivity 		r = -EFAULT;
3053ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
305493736624SAvi Kivity 			break;
3055383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
3056383d0b05SJens Freimann 			return -EINVAL;
3057383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
305893736624SAvi Kivity 		break;
3059ba5c1e9bSCarsten Otte 	}
3060b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
3061800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
3062bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
3063800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3064bc923cc9SAvi Kivity 		break;
3065b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
3066b0c632dbSHeiko Carstens 		psw_t psw;
3067b0c632dbSHeiko Carstens 
3068bc923cc9SAvi Kivity 		r = -EFAULT;
3069b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
3070bc923cc9SAvi Kivity 			break;
3071bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3072bc923cc9SAvi Kivity 		break;
3073b0c632dbSHeiko Carstens 	}
3074b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
3075bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3076bc923cc9SAvi Kivity 		break;
307714eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
307814eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
307914eebd91SCarsten Otte 		struct kvm_one_reg reg;
308014eebd91SCarsten Otte 		r = -EFAULT;
308114eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
308214eebd91SCarsten Otte 			break;
308314eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
308414eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
308514eebd91SCarsten Otte 		else
308614eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
308714eebd91SCarsten Otte 		break;
308814eebd91SCarsten Otte 	}
308927e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
309027e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
309127e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
309227e0393fSCarsten Otte 
309327e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
309427e0393fSCarsten Otte 			r = -EFAULT;
309527e0393fSCarsten Otte 			break;
309627e0393fSCarsten Otte 		}
309727e0393fSCarsten Otte 
309827e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
309927e0393fSCarsten Otte 			r = -EINVAL;
310027e0393fSCarsten Otte 			break;
310127e0393fSCarsten Otte 		}
310227e0393fSCarsten Otte 
310327e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
310427e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
310527e0393fSCarsten Otte 		break;
310627e0393fSCarsten Otte 	}
310727e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
310827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
310927e0393fSCarsten Otte 
311027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
311127e0393fSCarsten Otte 			r = -EFAULT;
311227e0393fSCarsten Otte 			break;
311327e0393fSCarsten Otte 		}
311427e0393fSCarsten Otte 
311527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
311627e0393fSCarsten Otte 			r = -EINVAL;
311727e0393fSCarsten Otte 			break;
311827e0393fSCarsten Otte 		}
311927e0393fSCarsten Otte 
312027e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
312127e0393fSCarsten Otte 			ucasmap.length);
312227e0393fSCarsten Otte 		break;
312327e0393fSCarsten Otte 	}
312427e0393fSCarsten Otte #endif
3125ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
3126527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
3127ccc7910fSCarsten Otte 		break;
3128ccc7910fSCarsten Otte 	}
3129d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
3130d6712df9SCornelia Huck 	{
3131d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
3132d6712df9SCornelia Huck 		r = -EFAULT;
3133d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
3134d6712df9SCornelia Huck 			break;
3135d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3136d6712df9SCornelia Huck 		break;
3137d6712df9SCornelia Huck 	}
313841408c28SThomas Huth 	case KVM_S390_MEM_OP: {
313941408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
314041408c28SThomas Huth 
314141408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
314241408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
314341408c28SThomas Huth 		else
314441408c28SThomas Huth 			r = -EFAULT;
314541408c28SThomas Huth 		break;
314641408c28SThomas Huth 	}
3147816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
3148816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3149816c7667SJens Freimann 
3150816c7667SJens Freimann 		r = -EFAULT;
3151816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3152816c7667SJens Freimann 			break;
3153816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3154816c7667SJens Freimann 		    irq_state.len == 0 ||
3155816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3156816c7667SJens Freimann 			r = -EINVAL;
3157816c7667SJens Freimann 			break;
3158816c7667SJens Freimann 		}
3159816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
3160816c7667SJens Freimann 					   (void __user *) irq_state.buf,
3161816c7667SJens Freimann 					   irq_state.len);
3162816c7667SJens Freimann 		break;
3163816c7667SJens Freimann 	}
3164816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
3165816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3166816c7667SJens Freimann 
3167816c7667SJens Freimann 		r = -EFAULT;
3168816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3169816c7667SJens Freimann 			break;
3170816c7667SJens Freimann 		if (irq_state.len == 0) {
3171816c7667SJens Freimann 			r = -EINVAL;
3172816c7667SJens Freimann 			break;
3173816c7667SJens Freimann 		}
3174816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
3175816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
3176816c7667SJens Freimann 					   irq_state.len);
3177816c7667SJens Freimann 		break;
3178816c7667SJens Freimann 	}
3179b0c632dbSHeiko Carstens 	default:
31803e6afcf1SCarsten Otte 		r = -ENOTTY;
3181b0c632dbSHeiko Carstens 	}
3182bc923cc9SAvi Kivity 	return r;
3183b0c632dbSHeiko Carstens }
3184b0c632dbSHeiko Carstens 
31855b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
31865b1c1493SCarsten Otte {
31875b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
31885b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
31895b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
31905b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
31915b1c1493SCarsten Otte 		get_page(vmf->page);
31925b1c1493SCarsten Otte 		return 0;
31935b1c1493SCarsten Otte 	}
31945b1c1493SCarsten Otte #endif
31955b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
31965b1c1493SCarsten Otte }
31975b1c1493SCarsten Otte 
31985587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
31995587027cSAneesh Kumar K.V 			    unsigned long npages)
3200db3fe4ebSTakuya Yoshikawa {
3201db3fe4ebSTakuya Yoshikawa 	return 0;
3202db3fe4ebSTakuya Yoshikawa }
3203db3fe4ebSTakuya Yoshikawa 
3204b0c632dbSHeiko Carstens /* Section: memory related */
3205f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
3206f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
320709170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
32087b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
3209b0c632dbSHeiko Carstens {
3210dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
3211dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
3212dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
3213dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
3214b0c632dbSHeiko Carstens 
3215598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
3216b0c632dbSHeiko Carstens 		return -EINVAL;
3217b0c632dbSHeiko Carstens 
3218598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
3219b0c632dbSHeiko Carstens 		return -EINVAL;
3220b0c632dbSHeiko Carstens 
3221a3a92c31SDominik Dingel 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3222a3a92c31SDominik Dingel 		return -EINVAL;
3223a3a92c31SDominik Dingel 
3224f7784b8eSMarcelo Tosatti 	return 0;
3225f7784b8eSMarcelo Tosatti }
3226f7784b8eSMarcelo Tosatti 
3227f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
322809170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
32298482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
3230f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
32318482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
3232f7784b8eSMarcelo Tosatti {
3233f7850c92SCarsten Otte 	int rc;
3234f7784b8eSMarcelo Tosatti 
32352cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
32362cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
32372cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
32382cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
32392cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
32402cef4debSChristian Borntraeger 	 */
32412cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
32422cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
32432cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
32442cef4debSChristian Borntraeger 		return;
3245598841caSCarsten Otte 
3246598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3247598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
3248598841caSCarsten Otte 	if (rc)
3249ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
3250598841caSCarsten Otte 	return;
3251b0c632dbSHeiko Carstens }
3252b0c632dbSHeiko Carstens 
325360a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
325460a37709SAlexander Yarygin {
325560a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
325660a37709SAlexander Yarygin 
325760a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
325860a37709SAlexander Yarygin }
325960a37709SAlexander Yarygin 
32603491caf2SChristian Borntraeger void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
32613491caf2SChristian Borntraeger {
32623491caf2SChristian Borntraeger 	vcpu->valid_wakeup = false;
32633491caf2SChristian Borntraeger }
32643491caf2SChristian Borntraeger 
3265b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
3266b0c632dbSHeiko Carstens {
326760a37709SAlexander Yarygin 	int i;
326860a37709SAlexander Yarygin 
326907197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
327007197fd0SDavid Hildenbrand 		pr_info("SIE not available\n");
327107197fd0SDavid Hildenbrand 		return -ENODEV;
327207197fd0SDavid Hildenbrand 	}
327307197fd0SDavid Hildenbrand 
327460a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
327560a37709SAlexander Yarygin 		kvm_s390_fac_list_mask[i] |=
327660a37709SAlexander Yarygin 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
327760a37709SAlexander Yarygin 
32789d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3279b0c632dbSHeiko Carstens }
3280b0c632dbSHeiko Carstens 
3281b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
3282b0c632dbSHeiko Carstens {
3283b0c632dbSHeiko Carstens 	kvm_exit();
3284b0c632dbSHeiko Carstens }
3285b0c632dbSHeiko Carstens 
3286b0c632dbSHeiko Carstens module_init(kvm_s390_init);
3287b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
3288566af940SCornelia Huck 
3289566af940SCornelia Huck /*
3290566af940SCornelia Huck  * Enable autoloading of the kvm module.
3291566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3292566af940SCornelia Huck  * since x86 takes a different approach.
3293566af940SCornelia Huck  */
3294566af940SCornelia Huck #include <linux/miscdevice.h>
3295566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
3296566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
3297