xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 91473b487dd58af6384c5c3db13de50defa2c106)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
25b0c632dbSHeiko Carstens #include <linux/module.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
32b0c632dbSHeiko Carstens #include <asm/lowcore.h>
33fdf03650SFan Zhang #include <asm/etr.h>
34b0c632dbSHeiko Carstens #include <asm/pgtable.h>
351e133ab2SMartin Schwidefsky #include <asm/gmap.h>
36f5daba1dSHeiko Carstens #include <asm/nmi.h>
37a0616cdeSDavid Howells #include <asm/switch_to.h>
386d3da241SJens Freimann #include <asm/isc.h>
391526bf9cSChristian Borntraeger #include <asm/sclp.h>
400a763c78SDavid Hildenbrand #include <asm/cpacf.h>
410a763c78SDavid Hildenbrand #include <asm/etr.h>
428f2abe6aSChristian Borntraeger #include "kvm-s390.h"
43b0c632dbSHeiko Carstens #include "gaccess.h"
44b0c632dbSHeiko Carstens 
45ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
46ea2cdd27SDavid Hildenbrand #undef pr_fmt
47ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48ea2cdd27SDavid Hildenbrand 
495786fffaSCornelia Huck #define CREATE_TRACE_POINTS
505786fffaSCornelia Huck #include "trace.h"
51ade38c31SCornelia Huck #include "trace-s390.h"
525786fffaSCornelia Huck 
5341408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
54816c7667SJens Freimann #define LOCAL_IRQS 32
55816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5741408c28SThomas Huth 
58b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59b0c632dbSHeiko Carstens 
60b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
61b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
620eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
638f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
648f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
658f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
668f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
67ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
68ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
70a011eeb2SJanosch Frank 	{ "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
71f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
7262bea5bfSPaolo Bonzini 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
733491caf2SChristian Borntraeger 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
74ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
75f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
76ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
77aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
78aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
79ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
807697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
81ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8869d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
89453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
90453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
91453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
92453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
93453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
948a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
95453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
96453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
97b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
98453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
99453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
100bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
10195ca2cb5SJanosch Frank 	{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
102a3508fbeSDavid Hildenbrand 	{ "instruction_sie", VCPU_STAT(instruction_sie) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
104bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
1057697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
1065288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
10742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
10842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
1095288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
11042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
11142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
112cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1135288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1145288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1155288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
11642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
11742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
11842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
119388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
120e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
12141628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
122175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
123175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
124175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
125b0c632dbSHeiko Carstens 	{ NULL }
126b0c632dbSHeiko Carstens };
127b0c632dbSHeiko Carstens 
1289d8d5786SMichael Mueller /* upper facilities limit for kvm */
12960a37709SAlexander Yarygin unsigned long kvm_s390_fac_list_mask[16] = {
13060a37709SAlexander Yarygin 	0xffe6000000000000UL,
13160a37709SAlexander Yarygin 	0x005e000000000000UL,
1329d8d5786SMichael Mueller };
133b0c632dbSHeiko Carstens 
1349d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
13578c4b59fSMichael Mueller {
1369d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1379d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
13878c4b59fSMichael Mueller }
13978c4b59fSMichael Mueller 
14015c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
14115c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1420a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
1430a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
14415c9705fSDavid Hildenbrand 
1459d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
146a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
14778f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1489d8d5786SMichael Mueller 
149b0c632dbSHeiko Carstens /* Section: not file related */
15013a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
151b0c632dbSHeiko Carstens {
152b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
15310474ae8SAlexander Graf 	return 0;
154b0c632dbSHeiko Carstens }
155b0c632dbSHeiko Carstens 
156414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
157414d3b07SMartin Schwidefsky 			      unsigned long end);
1582c70fe44SChristian Borntraeger 
159fdf03650SFan Zhang /*
160fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
161fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
162fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
163fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
164fdf03650SFan Zhang  */
165fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
166fdf03650SFan Zhang 			  void *v)
167fdf03650SFan Zhang {
168fdf03650SFan Zhang 	struct kvm *kvm;
169fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
170fdf03650SFan Zhang 	int i;
171fdf03650SFan Zhang 	unsigned long long *delta = v;
172fdf03650SFan Zhang 
173fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
174fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
175fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
176fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
177db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
178db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
179*91473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
180*91473b48SDavid Hildenbrand 				vcpu->arch.vsie_block->epoch -= *delta;
181fdf03650SFan Zhang 		}
182fdf03650SFan Zhang 	}
183fdf03650SFan Zhang 	return NOTIFY_OK;
184fdf03650SFan Zhang }
185fdf03650SFan Zhang 
186fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
187fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
188fdf03650SFan Zhang };
189fdf03650SFan Zhang 
190b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
191b0c632dbSHeiko Carstens {
1922c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
193b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
194a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
195a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
196fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
197fdf03650SFan Zhang 				       &kvm_clock_notifier);
198b0c632dbSHeiko Carstens 	return 0;
199b0c632dbSHeiko Carstens }
200b0c632dbSHeiko Carstens 
201b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
202b0c632dbSHeiko Carstens {
203b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
204a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
205fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
206fdf03650SFan Zhang 					 &kvm_clock_notifier);
207b0c632dbSHeiko Carstens }
208b0c632dbSHeiko Carstens 
20922be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
21022be5a13SDavid Hildenbrand {
21122be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
21222be5a13SDavid Hildenbrand }
21322be5a13SDavid Hildenbrand 
2140a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
2150a763c78SDavid Hildenbrand {
2160a763c78SDavid Hildenbrand 	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
2170a763c78SDavid Hildenbrand 	int cc = 3; /* subfunction not available */
2180a763c78SDavid Hildenbrand 
2190a763c78SDavid Hildenbrand 	asm volatile(
2200a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
2210a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
2220a763c78SDavid Hildenbrand 		"	ipm	%0\n"
2230a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
2240a763c78SDavid Hildenbrand 		: "=d" (cc)
2250a763c78SDavid Hildenbrand 		: "d" (r0)
2260a763c78SDavid Hildenbrand 		: "cc");
2270a763c78SDavid Hildenbrand 	return cc == 0;
2280a763c78SDavid Hildenbrand }
2290a763c78SDavid Hildenbrand 
23022be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
23122be5a13SDavid Hildenbrand {
2320a763c78SDavid Hildenbrand 	int i;
2330a763c78SDavid Hildenbrand 
2340a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
2350a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
2360a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
2370a763c78SDavid Hildenbrand 	}
2380a763c78SDavid Hildenbrand 
2390a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
2400a763c78SDavid Hildenbrand 		etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
2410a763c78SDavid Hildenbrand 
2420a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
2430a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
2440a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
2450a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
2460a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
2470a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
2480a763c78SDavid Hildenbrand 	}
2490a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
2500a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
2510a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
2520a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
2530a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
2540a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
2550a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
2560a763c78SDavid Hildenbrand 	}
2570a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
2580a763c78SDavid Hildenbrand 		__cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
2590a763c78SDavid Hildenbrand 
26022be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
26122be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
262a3508fbeSDavid Hildenbrand 	/*
263a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
264a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
265a3508fbeSDavid Hildenbrand 	 */
266a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
267a3508fbeSDavid Hildenbrand 	    !test_facility(3))
268a3508fbeSDavid Hildenbrand 		return;
269a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
27019c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
27119c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
2720615a326SDavid Hildenbrand 	if (sclp.has_siif)
2730615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
27477d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
27577d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
276a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
277a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
2785630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
2795630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
28013ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
28113ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
2827fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
2837fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
28422be5a13SDavid Hildenbrand }
28522be5a13SDavid Hildenbrand 
286b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
287b0c632dbSHeiko Carstens {
28878f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
28978f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
29078f26131SChristian Borntraeger 		return -ENOMEM;
29178f26131SChristian Borntraeger 
29278f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
29378f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
29478f26131SChristian Borntraeger 		return -ENOMEM;
29578f26131SChristian Borntraeger 	}
29678f26131SChristian Borntraeger 
29722be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
29822be5a13SDavid Hildenbrand 
29984877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
30084877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
301b0c632dbSHeiko Carstens }
302b0c632dbSHeiko Carstens 
30378f26131SChristian Borntraeger void kvm_arch_exit(void)
30478f26131SChristian Borntraeger {
30578f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
30678f26131SChristian Borntraeger }
30778f26131SChristian Borntraeger 
308b0c632dbSHeiko Carstens /* Section: device related */
309b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
310b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
311b0c632dbSHeiko Carstens {
312b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
313b0c632dbSHeiko Carstens 		return s390_enable_sie();
314b0c632dbSHeiko Carstens 	return -EINVAL;
315b0c632dbSHeiko Carstens }
316b0c632dbSHeiko Carstens 
317784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
318b0c632dbSHeiko Carstens {
319d7b0b5ebSCarsten Otte 	int r;
320d7b0b5ebSCarsten Otte 
3212bd0ac4eSCarsten Otte 	switch (ext) {
322d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
323b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
32452e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
3251efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3261efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
3271efd0f59SCarsten Otte #endif
3283c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
32960b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
33014eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
331d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
332fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
33310ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
334c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
335d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
33678599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
337f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
3386352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
33947b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
3402444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
341e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
34230ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
343816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
344d7b0b5ebSCarsten Otte 		r = 1;
345d7b0b5ebSCarsten Otte 		break;
34641408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
34741408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
34841408c28SThomas Huth 		break;
349e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
350e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
35176a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
35276a6dd72SDavid Hildenbrand 		if (sclp.has_esca && sclp.has_64bscao)
35376a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
354e726b1bdSChristian Borntraeger 		break;
355e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
356e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
357e1e2e605SNick Wang 		break;
3581526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
359abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
3601526bf9cSChristian Borntraeger 		break;
36168c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
36268c55750SEric Farman 		r = MACHINE_HAS_VX;
36368c55750SEric Farman 		break;
364c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
365c6e5f166SFan Zhang 		r = test_facility(64);
366c6e5f166SFan Zhang 		break;
3672bd0ac4eSCarsten Otte 	default:
368d7b0b5ebSCarsten Otte 		r = 0;
369b0c632dbSHeiko Carstens 	}
370d7b0b5ebSCarsten Otte 	return r;
3712bd0ac4eSCarsten Otte }
372b0c632dbSHeiko Carstens 
37315f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
37415f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
37515f36ebdSJason J. Herne {
37615f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
37715f36ebdSJason J. Herne 	unsigned long address;
37815f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
37915f36ebdSJason J. Herne 
38015f36ebdSJason J. Herne 	/* Loop over all guest pages */
38115f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
38215f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
38315f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
38415f36ebdSJason J. Herne 
3851e133ab2SMartin Schwidefsky 		if (test_and_clear_guest_dirty(gmap->mm, address))
38615f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
3871763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
3881763f8d0SChristian Borntraeger 			return;
38970c88a00SChristian Borntraeger 		cond_resched();
39015f36ebdSJason J. Herne 	}
39115f36ebdSJason J. Herne }
39215f36ebdSJason J. Herne 
393b0c632dbSHeiko Carstens /* Section: vm related */
394a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
395a6e2f683SEugene (jno) Dvurechenski 
396b0c632dbSHeiko Carstens /*
397b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
398b0c632dbSHeiko Carstens  */
399b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
400b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
401b0c632dbSHeiko Carstens {
40215f36ebdSJason J. Herne 	int r;
40315f36ebdSJason J. Herne 	unsigned long n;
4049f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
40515f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
40615f36ebdSJason J. Herne 	int is_dirty = 0;
40715f36ebdSJason J. Herne 
40815f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
40915f36ebdSJason J. Herne 
41015f36ebdSJason J. Herne 	r = -EINVAL;
41115f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
41215f36ebdSJason J. Herne 		goto out;
41315f36ebdSJason J. Herne 
4149f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
4159f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
41615f36ebdSJason J. Herne 	r = -ENOENT;
41715f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
41815f36ebdSJason J. Herne 		goto out;
41915f36ebdSJason J. Herne 
42015f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
42115f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
42215f36ebdSJason J. Herne 	if (r)
42315f36ebdSJason J. Herne 		goto out;
42415f36ebdSJason J. Herne 
42515f36ebdSJason J. Herne 	/* Clear the dirty log */
42615f36ebdSJason J. Herne 	if (is_dirty) {
42715f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
42815f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
42915f36ebdSJason J. Herne 	}
43015f36ebdSJason J. Herne 	r = 0;
43115f36ebdSJason J. Herne out:
43215f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
43315f36ebdSJason J. Herne 	return r;
434b0c632dbSHeiko Carstens }
435b0c632dbSHeiko Carstens 
436d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
437d938dc55SCornelia Huck {
438d938dc55SCornelia Huck 	int r;
439d938dc55SCornelia Huck 
440d938dc55SCornelia Huck 	if (cap->flags)
441d938dc55SCornelia Huck 		return -EINVAL;
442d938dc55SCornelia Huck 
443d938dc55SCornelia Huck 	switch (cap->cap) {
44484223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
445c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
44684223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
44784223598SCornelia Huck 		r = 0;
44884223598SCornelia Huck 		break;
4492444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
450c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
4512444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
4522444b352SDavid Hildenbrand 		r = 0;
4532444b352SDavid Hildenbrand 		break;
45468c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
4555967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
456a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
4575967c17bSDavid Hildenbrand 			r = -EBUSY;
4585967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
459c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
460c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
46118280d8bSMichael Mueller 			r = 0;
46218280d8bSMichael Mueller 		} else
46318280d8bSMichael Mueller 			r = -EINVAL;
4645967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
465c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
466c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
46768c55750SEric Farman 		break;
468c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
469c6e5f166SFan Zhang 		r = -EINVAL;
470c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
471a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
472c6e5f166SFan Zhang 			r = -EBUSY;
473c6e5f166SFan Zhang 		} else if (test_facility(64)) {
474c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
475c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
476c6e5f166SFan Zhang 			r = 0;
477c6e5f166SFan Zhang 		}
478c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
479c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
480c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
481c6e5f166SFan Zhang 		break;
482e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
483c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
484e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
485e44fc8c9SEkaterina Tumanova 		r = 0;
486e44fc8c9SEkaterina Tumanova 		break;
487d938dc55SCornelia Huck 	default:
488d938dc55SCornelia Huck 		r = -EINVAL;
489d938dc55SCornelia Huck 		break;
490d938dc55SCornelia Huck 	}
491d938dc55SCornelia Huck 	return r;
492d938dc55SCornelia Huck }
493d938dc55SCornelia Huck 
4948c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4958c0a7ce6SDominik Dingel {
4968c0a7ce6SDominik Dingel 	int ret;
4978c0a7ce6SDominik Dingel 
4988c0a7ce6SDominik Dingel 	switch (attr->attr) {
4998c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
5008c0a7ce6SDominik Dingel 		ret = 0;
501c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
502a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
503a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
5048c0a7ce6SDominik Dingel 			ret = -EFAULT;
5058c0a7ce6SDominik Dingel 		break;
5068c0a7ce6SDominik Dingel 	default:
5078c0a7ce6SDominik Dingel 		ret = -ENXIO;
5088c0a7ce6SDominik Dingel 		break;
5098c0a7ce6SDominik Dingel 	}
5108c0a7ce6SDominik Dingel 	return ret;
5118c0a7ce6SDominik Dingel }
5128c0a7ce6SDominik Dingel 
5138c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
5144f718eabSDominik Dingel {
5154f718eabSDominik Dingel 	int ret;
5164f718eabSDominik Dingel 	unsigned int idx;
5174f718eabSDominik Dingel 	switch (attr->attr) {
5184f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
519f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
520c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
521e6db1d61SDominik Dingel 			break;
522e6db1d61SDominik Dingel 
5234f718eabSDominik Dingel 		ret = -EBUSY;
524c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
5254f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
526a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
5274f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
5284f718eabSDominik Dingel 			ret = 0;
5294f718eabSDominik Dingel 		}
5304f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5314f718eabSDominik Dingel 		break;
5324f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
533f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
534f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
535f9cbd9b0SDavid Hildenbrand 			break;
536c3489155SDominik Dingel 		ret = -EINVAL;
537c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
538c3489155SDominik Dingel 			break;
539c3489155SDominik Dingel 
540c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
5414f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
5424f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
543a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
5444f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
5454f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
5464f718eabSDominik Dingel 		ret = 0;
5474f718eabSDominik Dingel 		break;
5488c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
5498c0a7ce6SDominik Dingel 		unsigned long new_limit;
5508c0a7ce6SDominik Dingel 
5518c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
5528c0a7ce6SDominik Dingel 			return -EINVAL;
5538c0a7ce6SDominik Dingel 
5548c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
5558c0a7ce6SDominik Dingel 			return -EFAULT;
5568c0a7ce6SDominik Dingel 
557a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
558a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
5598c0a7ce6SDominik Dingel 			return -E2BIG;
5608c0a7ce6SDominik Dingel 
561a3a92c31SDominik Dingel 		if (!new_limit)
562a3a92c31SDominik Dingel 			return -EINVAL;
563a3a92c31SDominik Dingel 
5646ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
565a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
566a3a92c31SDominik Dingel 			new_limit -= 1;
567a3a92c31SDominik Dingel 
5688c0a7ce6SDominik Dingel 		ret = -EBUSY;
5698c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
570a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
5716ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
5726ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
5738c0a7ce6SDominik Dingel 
5748c0a7ce6SDominik Dingel 			if (!new) {
5758c0a7ce6SDominik Dingel 				ret = -ENOMEM;
5768c0a7ce6SDominik Dingel 			} else {
5776ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
5788c0a7ce6SDominik Dingel 				new->private = kvm;
5798c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
5808c0a7ce6SDominik Dingel 				ret = 0;
5818c0a7ce6SDominik Dingel 			}
5828c0a7ce6SDominik Dingel 		}
5838c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
584a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
585a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
586a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
5878c0a7ce6SDominik Dingel 		break;
5888c0a7ce6SDominik Dingel 	}
5894f718eabSDominik Dingel 	default:
5904f718eabSDominik Dingel 		ret = -ENXIO;
5914f718eabSDominik Dingel 		break;
5924f718eabSDominik Dingel 	}
5934f718eabSDominik Dingel 	return ret;
5944f718eabSDominik Dingel }
5954f718eabSDominik Dingel 
596a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
597a374e892STony Krowiak 
598a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
599a374e892STony Krowiak {
600a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
601a374e892STony Krowiak 	int i;
602a374e892STony Krowiak 
6039d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
604a374e892STony Krowiak 		return -EINVAL;
605a374e892STony Krowiak 
606a374e892STony Krowiak 	mutex_lock(&kvm->lock);
607a374e892STony Krowiak 	switch (attr->attr) {
608a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
609a374e892STony Krowiak 		get_random_bytes(
610a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
611a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
612a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
613c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
614a374e892STony Krowiak 		break;
615a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
616a374e892STony Krowiak 		get_random_bytes(
617a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
618a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
619a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
620c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
621a374e892STony Krowiak 		break;
622a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
623a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
624a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
625a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
626c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
627a374e892STony Krowiak 		break;
628a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
629a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
630a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
631a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
632c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
633a374e892STony Krowiak 		break;
634a374e892STony Krowiak 	default:
635a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
636a374e892STony Krowiak 		return -ENXIO;
637a374e892STony Krowiak 	}
638a374e892STony Krowiak 
639a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
640a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
641a374e892STony Krowiak 		exit_sie(vcpu);
642a374e892STony Krowiak 	}
643a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
644a374e892STony Krowiak 	return 0;
645a374e892STony Krowiak }
646a374e892STony Krowiak 
64772f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
64872f25020SJason J. Herne {
64972f25020SJason J. Herne 	u8 gtod_high;
65072f25020SJason J. Herne 
65172f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
65272f25020SJason J. Herne 					   sizeof(gtod_high)))
65372f25020SJason J. Herne 		return -EFAULT;
65472f25020SJason J. Herne 
65572f25020SJason J. Herne 	if (gtod_high != 0)
65672f25020SJason J. Herne 		return -EINVAL;
65758c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
65872f25020SJason J. Herne 
65972f25020SJason J. Herne 	return 0;
66072f25020SJason J. Herne }
66172f25020SJason J. Herne 
66272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
66372f25020SJason J. Herne {
6645a3d883aSDavid Hildenbrand 	u64 gtod;
66572f25020SJason J. Herne 
66672f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
66772f25020SJason J. Herne 		return -EFAULT;
66872f25020SJason J. Herne 
66925ed1675SDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, gtod);
67058c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
67172f25020SJason J. Herne 	return 0;
67272f25020SJason J. Herne }
67372f25020SJason J. Herne 
67472f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
67572f25020SJason J. Herne {
67672f25020SJason J. Herne 	int ret;
67772f25020SJason J. Herne 
67872f25020SJason J. Herne 	if (attr->flags)
67972f25020SJason J. Herne 		return -EINVAL;
68072f25020SJason J. Herne 
68172f25020SJason J. Herne 	switch (attr->attr) {
68272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
68372f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
68472f25020SJason J. Herne 		break;
68572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
68672f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
68772f25020SJason J. Herne 		break;
68872f25020SJason J. Herne 	default:
68972f25020SJason J. Herne 		ret = -ENXIO;
69072f25020SJason J. Herne 		break;
69172f25020SJason J. Herne 	}
69272f25020SJason J. Herne 	return ret;
69372f25020SJason J. Herne }
69472f25020SJason J. Herne 
69572f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
69672f25020SJason J. Herne {
69772f25020SJason J. Herne 	u8 gtod_high = 0;
69872f25020SJason J. Herne 
69972f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
70072f25020SJason J. Herne 					 sizeof(gtod_high)))
70172f25020SJason J. Herne 		return -EFAULT;
70258c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
70372f25020SJason J. Herne 
70472f25020SJason J. Herne 	return 0;
70572f25020SJason J. Herne }
70672f25020SJason J. Herne 
70772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
70872f25020SJason J. Herne {
7095a3d883aSDavid Hildenbrand 	u64 gtod;
71072f25020SJason J. Herne 
71160417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
71272f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
71372f25020SJason J. Herne 		return -EFAULT;
71458c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
71572f25020SJason J. Herne 
71672f25020SJason J. Herne 	return 0;
71772f25020SJason J. Herne }
71872f25020SJason J. Herne 
71972f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
72072f25020SJason J. Herne {
72172f25020SJason J. Herne 	int ret;
72272f25020SJason J. Herne 
72372f25020SJason J. Herne 	if (attr->flags)
72472f25020SJason J. Herne 		return -EINVAL;
72572f25020SJason J. Herne 
72672f25020SJason J. Herne 	switch (attr->attr) {
72772f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
72872f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
72972f25020SJason J. Herne 		break;
73072f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
73172f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
73272f25020SJason J. Herne 		break;
73372f25020SJason J. Herne 	default:
73472f25020SJason J. Herne 		ret = -ENXIO;
73572f25020SJason J. Herne 		break;
73672f25020SJason J. Herne 	}
73772f25020SJason J. Herne 	return ret;
73872f25020SJason J. Herne }
73972f25020SJason J. Herne 
740658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
741658b6edaSMichael Mueller {
742658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
743053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
744658b6edaSMichael Mueller 	int ret = 0;
745658b6edaSMichael Mueller 
746658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
747a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
748658b6edaSMichael Mueller 		ret = -EBUSY;
749658b6edaSMichael Mueller 		goto out;
750658b6edaSMichael Mueller 	}
751658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
752658b6edaSMichael Mueller 	if (!proc) {
753658b6edaSMichael Mueller 		ret = -ENOMEM;
754658b6edaSMichael Mueller 		goto out;
755658b6edaSMichael Mueller 	}
756658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
757658b6edaSMichael Mueller 			    sizeof(*proc))) {
7589bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
759053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
760053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
761053dd230SDavid Hildenbrand 		if (lowest_ibc) {
762053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
763053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
764053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
765053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
766053dd230SDavid Hildenbrand 			else
767658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
768053dd230SDavid Hildenbrand 		}
769c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
770658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
771658b6edaSMichael Mueller 	} else
772658b6edaSMichael Mueller 		ret = -EFAULT;
773658b6edaSMichael Mueller 	kfree(proc);
774658b6edaSMichael Mueller out:
775658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
776658b6edaSMichael Mueller 	return ret;
777658b6edaSMichael Mueller }
778658b6edaSMichael Mueller 
77915c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
78015c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
78115c9705fSDavid Hildenbrand {
78215c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
78315c9705fSDavid Hildenbrand 	int ret = -EBUSY;
78415c9705fSDavid Hildenbrand 
78515c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
78615c9705fSDavid Hildenbrand 		return -EFAULT;
78715c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
78815c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
78915c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
79015c9705fSDavid Hildenbrand 		return -EINVAL;
79115c9705fSDavid Hildenbrand 
79215c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
79315c9705fSDavid Hildenbrand 	if (!atomic_read(&kvm->online_vcpus)) {
79415c9705fSDavid Hildenbrand 		bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
79515c9705fSDavid Hildenbrand 			    KVM_S390_VM_CPU_FEAT_NR_BITS);
79615c9705fSDavid Hildenbrand 		ret = 0;
79715c9705fSDavid Hildenbrand 	}
79815c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
79915c9705fSDavid Hildenbrand 	return ret;
80015c9705fSDavid Hildenbrand }
80115c9705fSDavid Hildenbrand 
8020a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
8030a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
8040a763c78SDavid Hildenbrand {
8050a763c78SDavid Hildenbrand 	/*
8060a763c78SDavid Hildenbrand 	 * Once supported by kernel + hw, we have to store the subfunctions
8070a763c78SDavid Hildenbrand 	 * in kvm->arch and remember that user space configured them.
8080a763c78SDavid Hildenbrand 	 */
8090a763c78SDavid Hildenbrand 	return -ENXIO;
8100a763c78SDavid Hildenbrand }
8110a763c78SDavid Hildenbrand 
812658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
813658b6edaSMichael Mueller {
814658b6edaSMichael Mueller 	int ret = -ENXIO;
815658b6edaSMichael Mueller 
816658b6edaSMichael Mueller 	switch (attr->attr) {
817658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
818658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
819658b6edaSMichael Mueller 		break;
82015c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
82115c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
82215c9705fSDavid Hildenbrand 		break;
8230a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
8240a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
8250a763c78SDavid Hildenbrand 		break;
826658b6edaSMichael Mueller 	}
827658b6edaSMichael Mueller 	return ret;
828658b6edaSMichael Mueller }
829658b6edaSMichael Mueller 
830658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
831658b6edaSMichael Mueller {
832658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
833658b6edaSMichael Mueller 	int ret = 0;
834658b6edaSMichael Mueller 
835658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
836658b6edaSMichael Mueller 	if (!proc) {
837658b6edaSMichael Mueller 		ret = -ENOMEM;
838658b6edaSMichael Mueller 		goto out;
839658b6edaSMichael Mueller 	}
8409bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
841658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
842c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
843c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
844658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
845658b6edaSMichael Mueller 		ret = -EFAULT;
846658b6edaSMichael Mueller 	kfree(proc);
847658b6edaSMichael Mueller out:
848658b6edaSMichael Mueller 	return ret;
849658b6edaSMichael Mueller }
850658b6edaSMichael Mueller 
851658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
852658b6edaSMichael Mueller {
853658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
854658b6edaSMichael Mueller 	int ret = 0;
855658b6edaSMichael Mueller 
856658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
857658b6edaSMichael Mueller 	if (!mach) {
858658b6edaSMichael Mueller 		ret = -ENOMEM;
859658b6edaSMichael Mueller 		goto out;
860658b6edaSMichael Mueller 	}
861658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
86237c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
863c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
864981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
865658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
86694422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
867658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
868658b6edaSMichael Mueller 		ret = -EFAULT;
869658b6edaSMichael Mueller 	kfree(mach);
870658b6edaSMichael Mueller out:
871658b6edaSMichael Mueller 	return ret;
872658b6edaSMichael Mueller }
873658b6edaSMichael Mueller 
87415c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
87515c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
87615c9705fSDavid Hildenbrand {
87715c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
87815c9705fSDavid Hildenbrand 
87915c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
88015c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
88115c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
88215c9705fSDavid Hildenbrand 		return -EFAULT;
88315c9705fSDavid Hildenbrand 	return 0;
88415c9705fSDavid Hildenbrand }
88515c9705fSDavid Hildenbrand 
88615c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
88715c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
88815c9705fSDavid Hildenbrand {
88915c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
89015c9705fSDavid Hildenbrand 
89115c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat,
89215c9705fSDavid Hildenbrand 		    kvm_s390_available_cpu_feat,
89315c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
89415c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
89515c9705fSDavid Hildenbrand 		return -EFAULT;
89615c9705fSDavid Hildenbrand 	return 0;
89715c9705fSDavid Hildenbrand }
89815c9705fSDavid Hildenbrand 
8990a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
9000a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
9010a763c78SDavid Hildenbrand {
9020a763c78SDavid Hildenbrand 	/*
9030a763c78SDavid Hildenbrand 	 * Once we can actually configure subfunctions (kernel + hw support),
9040a763c78SDavid Hildenbrand 	 * we have to check if they were already set by user space, if so copy
9050a763c78SDavid Hildenbrand 	 * them from kvm->arch.
9060a763c78SDavid Hildenbrand 	 */
9070a763c78SDavid Hildenbrand 	return -ENXIO;
9080a763c78SDavid Hildenbrand }
9090a763c78SDavid Hildenbrand 
9100a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
9110a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
9120a763c78SDavid Hildenbrand {
9130a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
9140a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
9150a763c78SDavid Hildenbrand 		return -EFAULT;
9160a763c78SDavid Hildenbrand 	return 0;
9170a763c78SDavid Hildenbrand }
918658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
919658b6edaSMichael Mueller {
920658b6edaSMichael Mueller 	int ret = -ENXIO;
921658b6edaSMichael Mueller 
922658b6edaSMichael Mueller 	switch (attr->attr) {
923658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
924658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
925658b6edaSMichael Mueller 		break;
926658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
927658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
928658b6edaSMichael Mueller 		break;
92915c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
93015c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
93115c9705fSDavid Hildenbrand 		break;
93215c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
93315c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
93415c9705fSDavid Hildenbrand 		break;
9350a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
9360a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
9370a763c78SDavid Hildenbrand 		break;
9380a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
9390a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
9400a763c78SDavid Hildenbrand 		break;
941658b6edaSMichael Mueller 	}
942658b6edaSMichael Mueller 	return ret;
943658b6edaSMichael Mueller }
944658b6edaSMichael Mueller 
945f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
946f2061656SDominik Dingel {
947f2061656SDominik Dingel 	int ret;
948f2061656SDominik Dingel 
949f2061656SDominik Dingel 	switch (attr->group) {
9504f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9518c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
9524f718eabSDominik Dingel 		break;
95372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
95472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
95572f25020SJason J. Herne 		break;
956658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
957658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
958658b6edaSMichael Mueller 		break;
959a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
960a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
961a374e892STony Krowiak 		break;
962f2061656SDominik Dingel 	default:
963f2061656SDominik Dingel 		ret = -ENXIO;
964f2061656SDominik Dingel 		break;
965f2061656SDominik Dingel 	}
966f2061656SDominik Dingel 
967f2061656SDominik Dingel 	return ret;
968f2061656SDominik Dingel }
969f2061656SDominik Dingel 
970f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
971f2061656SDominik Dingel {
9728c0a7ce6SDominik Dingel 	int ret;
9738c0a7ce6SDominik Dingel 
9748c0a7ce6SDominik Dingel 	switch (attr->group) {
9758c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9768c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
9778c0a7ce6SDominik Dingel 		break;
97872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
97972f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
98072f25020SJason J. Herne 		break;
981658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
982658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
983658b6edaSMichael Mueller 		break;
9848c0a7ce6SDominik Dingel 	default:
9858c0a7ce6SDominik Dingel 		ret = -ENXIO;
9868c0a7ce6SDominik Dingel 		break;
9878c0a7ce6SDominik Dingel 	}
9888c0a7ce6SDominik Dingel 
9898c0a7ce6SDominik Dingel 	return ret;
990f2061656SDominik Dingel }
991f2061656SDominik Dingel 
992f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
993f2061656SDominik Dingel {
994f2061656SDominik Dingel 	int ret;
995f2061656SDominik Dingel 
996f2061656SDominik Dingel 	switch (attr->group) {
9974f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
9984f718eabSDominik Dingel 		switch (attr->attr) {
9994f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
10004f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1001f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1002f9cbd9b0SDavid Hildenbrand 			break;
10038c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
10044f718eabSDominik Dingel 			ret = 0;
10054f718eabSDominik Dingel 			break;
10064f718eabSDominik Dingel 		default:
10074f718eabSDominik Dingel 			ret = -ENXIO;
10084f718eabSDominik Dingel 			break;
10094f718eabSDominik Dingel 		}
10104f718eabSDominik Dingel 		break;
101172f25020SJason J. Herne 	case KVM_S390_VM_TOD:
101272f25020SJason J. Herne 		switch (attr->attr) {
101372f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
101472f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
101572f25020SJason J. Herne 			ret = 0;
101672f25020SJason J. Herne 			break;
101772f25020SJason J. Herne 		default:
101872f25020SJason J. Herne 			ret = -ENXIO;
101972f25020SJason J. Herne 			break;
102072f25020SJason J. Herne 		}
102172f25020SJason J. Herne 		break;
1022658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1023658b6edaSMichael Mueller 		switch (attr->attr) {
1024658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1025658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
102615c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
102715c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
10280a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1029658b6edaSMichael Mueller 			ret = 0;
1030658b6edaSMichael Mueller 			break;
10310a763c78SDavid Hildenbrand 		/* configuring subfunctions is not supported yet */
10320a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1033658b6edaSMichael Mueller 		default:
1034658b6edaSMichael Mueller 			ret = -ENXIO;
1035658b6edaSMichael Mueller 			break;
1036658b6edaSMichael Mueller 		}
1037658b6edaSMichael Mueller 		break;
1038a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1039a374e892STony Krowiak 		switch (attr->attr) {
1040a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1041a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1042a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1043a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1044a374e892STony Krowiak 			ret = 0;
1045a374e892STony Krowiak 			break;
1046a374e892STony Krowiak 		default:
1047a374e892STony Krowiak 			ret = -ENXIO;
1048a374e892STony Krowiak 			break;
1049a374e892STony Krowiak 		}
1050a374e892STony Krowiak 		break;
1051f2061656SDominik Dingel 	default:
1052f2061656SDominik Dingel 		ret = -ENXIO;
1053f2061656SDominik Dingel 		break;
1054f2061656SDominik Dingel 	}
1055f2061656SDominik Dingel 
1056f2061656SDominik Dingel 	return ret;
1057f2061656SDominik Dingel }
1058f2061656SDominik Dingel 
105930ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
106030ee2a98SJason J. Herne {
106130ee2a98SJason J. Herne 	uint8_t *keys;
106230ee2a98SJason J. Herne 	uint64_t hva;
106330ee2a98SJason J. Herne 	int i, r = 0;
106430ee2a98SJason J. Herne 
106530ee2a98SJason J. Herne 	if (args->flags != 0)
106630ee2a98SJason J. Herne 		return -EINVAL;
106730ee2a98SJason J. Herne 
106830ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
106930ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
107030ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
107130ee2a98SJason J. Herne 
107230ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
107330ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
107430ee2a98SJason J. Herne 		return -EINVAL;
107530ee2a98SJason J. Herne 
107630ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
107730ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
107830ee2a98SJason J. Herne 	if (!keys)
107930ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
108030ee2a98SJason J. Herne 	if (!keys)
108130ee2a98SJason J. Herne 		return -ENOMEM;
108230ee2a98SJason J. Herne 
1083d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
108430ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
108530ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
108630ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
108730ee2a98SJason J. Herne 			r = -EFAULT;
1088d3ed1ceeSMartin Schwidefsky 			break;
108930ee2a98SJason J. Herne 		}
109030ee2a98SJason J. Herne 
1091154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1092154c8c19SDavid Hildenbrand 		if (r)
1093d3ed1ceeSMartin Schwidefsky 			break;
109430ee2a98SJason J. Herne 	}
1095d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
109630ee2a98SJason J. Herne 
1097d3ed1ceeSMartin Schwidefsky 	if (!r) {
109830ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
109930ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
110030ee2a98SJason J. Herne 		if (r)
110130ee2a98SJason J. Herne 			r = -EFAULT;
1102d3ed1ceeSMartin Schwidefsky 	}
1103d3ed1ceeSMartin Schwidefsky 
110430ee2a98SJason J. Herne 	kvfree(keys);
110530ee2a98SJason J. Herne 	return r;
110630ee2a98SJason J. Herne }
110730ee2a98SJason J. Herne 
110830ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
110930ee2a98SJason J. Herne {
111030ee2a98SJason J. Herne 	uint8_t *keys;
111130ee2a98SJason J. Herne 	uint64_t hva;
111230ee2a98SJason J. Herne 	int i, r = 0;
111330ee2a98SJason J. Herne 
111430ee2a98SJason J. Herne 	if (args->flags != 0)
111530ee2a98SJason J. Herne 		return -EINVAL;
111630ee2a98SJason J. Herne 
111730ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
111830ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
111930ee2a98SJason J. Herne 		return -EINVAL;
112030ee2a98SJason J. Herne 
112130ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
112230ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
112330ee2a98SJason J. Herne 	if (!keys)
112430ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
112530ee2a98SJason J. Herne 	if (!keys)
112630ee2a98SJason J. Herne 		return -ENOMEM;
112730ee2a98SJason J. Herne 
112830ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
112930ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
113030ee2a98SJason J. Herne 	if (r) {
113130ee2a98SJason J. Herne 		r = -EFAULT;
113230ee2a98SJason J. Herne 		goto out;
113330ee2a98SJason J. Herne 	}
113430ee2a98SJason J. Herne 
113530ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
113614d4a425SDominik Dingel 	r = s390_enable_skey();
113714d4a425SDominik Dingel 	if (r)
113814d4a425SDominik Dingel 		goto out;
113930ee2a98SJason J. Herne 
1140d3ed1ceeSMartin Schwidefsky 	down_read(&current->mm->mmap_sem);
114130ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
114230ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
114330ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
114430ee2a98SJason J. Herne 			r = -EFAULT;
1145d3ed1ceeSMartin Schwidefsky 			break;
114630ee2a98SJason J. Herne 		}
114730ee2a98SJason J. Herne 
114830ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
114930ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
115030ee2a98SJason J. Herne 			r = -EINVAL;
1151d3ed1ceeSMartin Schwidefsky 			break;
115230ee2a98SJason J. Herne 		}
115330ee2a98SJason J. Herne 
1154fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
115530ee2a98SJason J. Herne 		if (r)
1156d3ed1ceeSMartin Schwidefsky 			break;
115730ee2a98SJason J. Herne 	}
1158d3ed1ceeSMartin Schwidefsky 	up_read(&current->mm->mmap_sem);
115930ee2a98SJason J. Herne out:
116030ee2a98SJason J. Herne 	kvfree(keys);
116130ee2a98SJason J. Herne 	return r;
116230ee2a98SJason J. Herne }
116330ee2a98SJason J. Herne 
1164b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
1165b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
1166b0c632dbSHeiko Carstens {
1167b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
1168b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1169f2061656SDominik Dingel 	struct kvm_device_attr attr;
1170b0c632dbSHeiko Carstens 	int r;
1171b0c632dbSHeiko Carstens 
1172b0c632dbSHeiko Carstens 	switch (ioctl) {
1173ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
1174ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1175ba5c1e9bSCarsten Otte 
1176ba5c1e9bSCarsten Otte 		r = -EFAULT;
1177ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
1178ba5c1e9bSCarsten Otte 			break;
1179ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
1180ba5c1e9bSCarsten Otte 		break;
1181ba5c1e9bSCarsten Otte 	}
1182d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
1183d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
1184d938dc55SCornelia Huck 		r = -EFAULT;
1185d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1186d938dc55SCornelia Huck 			break;
1187d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1188d938dc55SCornelia Huck 		break;
1189d938dc55SCornelia Huck 	}
119084223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
119184223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
119284223598SCornelia Huck 
119384223598SCornelia Huck 		r = -EINVAL;
119484223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
119584223598SCornelia Huck 			/* Set up dummy routing. */
119684223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
1197152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
119884223598SCornelia Huck 		}
119984223598SCornelia Huck 		break;
120084223598SCornelia Huck 	}
1201f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
1202f2061656SDominik Dingel 		r = -EFAULT;
1203f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1204f2061656SDominik Dingel 			break;
1205f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
1206f2061656SDominik Dingel 		break;
1207f2061656SDominik Dingel 	}
1208f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
1209f2061656SDominik Dingel 		r = -EFAULT;
1210f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1211f2061656SDominik Dingel 			break;
1212f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
1213f2061656SDominik Dingel 		break;
1214f2061656SDominik Dingel 	}
1215f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
1216f2061656SDominik Dingel 		r = -EFAULT;
1217f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1218f2061656SDominik Dingel 			break;
1219f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
1220f2061656SDominik Dingel 		break;
1221f2061656SDominik Dingel 	}
122230ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
122330ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
122430ee2a98SJason J. Herne 
122530ee2a98SJason J. Herne 		r = -EFAULT;
122630ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
122730ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
122830ee2a98SJason J. Herne 			break;
122930ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
123030ee2a98SJason J. Herne 		break;
123130ee2a98SJason J. Herne 	}
123230ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
123330ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
123430ee2a98SJason J. Herne 
123530ee2a98SJason J. Herne 		r = -EFAULT;
123630ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
123730ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
123830ee2a98SJason J. Herne 			break;
123930ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
124030ee2a98SJason J. Herne 		break;
124130ee2a98SJason J. Herne 	}
1242b0c632dbSHeiko Carstens 	default:
1243367e1319SAvi Kivity 		r = -ENOTTY;
1244b0c632dbSHeiko Carstens 	}
1245b0c632dbSHeiko Carstens 
1246b0c632dbSHeiko Carstens 	return r;
1247b0c632dbSHeiko Carstens }
1248b0c632dbSHeiko Carstens 
124945c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
125045c9b47cSTony Krowiak {
125145c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
125286044c8cSChristian Borntraeger 	u32 cc = 0;
125345c9b47cSTony Krowiak 
125486044c8cSChristian Borntraeger 	memset(config, 0, 128);
125545c9b47cSTony Krowiak 	asm volatile(
125645c9b47cSTony Krowiak 		"lgr 0,%1\n"
125745c9b47cSTony Krowiak 		"lgr 2,%2\n"
125845c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
125986044c8cSChristian Borntraeger 		"0: ipm %0\n"
126045c9b47cSTony Krowiak 		"srl %0,28\n"
126186044c8cSChristian Borntraeger 		"1:\n"
126286044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
126386044c8cSChristian Borntraeger 		: "+r" (cc)
126445c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
126545c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
126645c9b47cSTony Krowiak 	);
126745c9b47cSTony Krowiak 
126845c9b47cSTony Krowiak 	return cc;
126945c9b47cSTony Krowiak }
127045c9b47cSTony Krowiak 
127145c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
127245c9b47cSTony Krowiak {
127345c9b47cSTony Krowiak 	u8 config[128];
127445c9b47cSTony Krowiak 	int cc;
127545c9b47cSTony Krowiak 
1276a6aacc3fSHeiko Carstens 	if (test_facility(12)) {
127745c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
127845c9b47cSTony Krowiak 
127945c9b47cSTony Krowiak 		if (cc)
128045c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
128145c9b47cSTony Krowiak 		else
128245c9b47cSTony Krowiak 			return config[0] & 0x40;
128345c9b47cSTony Krowiak 	}
128445c9b47cSTony Krowiak 
128545c9b47cSTony Krowiak 	return 0;
128645c9b47cSTony Krowiak }
128745c9b47cSTony Krowiak 
128845c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
128945c9b47cSTony Krowiak {
129045c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
129145c9b47cSTony Krowiak 
129245c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
129345c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
129445c9b47cSTony Krowiak 	else
129545c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
129645c9b47cSTony Krowiak }
129745c9b47cSTony Krowiak 
12989bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
12999d8d5786SMichael Mueller {
13009bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
13019bb0ec09SDavid Hildenbrand 
13029bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
13039bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
13049bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
13059d8d5786SMichael Mueller }
13069d8d5786SMichael Mueller 
1307c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
13085102ee87STony Krowiak {
13099d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
1310c54f0d6aSDavid Hildenbrand 		return;
13115102ee87STony Krowiak 
1312c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
131345c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
13145102ee87STony Krowiak 
1315ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1316ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1317ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1318ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1319ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1320ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1321ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
13225102ee87STony Krowiak }
13235102ee87STony Krowiak 
13247d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
13257d43bafcSEugene (jno) Dvurechenski {
13267d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
13275e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
13287d43bafcSEugene (jno) Dvurechenski 	else
13297d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
13307d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
13317d43bafcSEugene (jno) Dvurechenski }
13327d43bafcSEugene (jno) Dvurechenski 
1333e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1334b0c632dbSHeiko Carstens {
133576a6dd72SDavid Hildenbrand 	gfp_t alloc_flags = GFP_KERNEL;
13369d8d5786SMichael Mueller 	int i, rc;
1337b0c632dbSHeiko Carstens 	char debug_name[16];
1338f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1339b0c632dbSHeiko Carstens 
1340e08b9637SCarsten Otte 	rc = -EINVAL;
1341e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1342e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1343e08b9637SCarsten Otte 		goto out_err;
1344e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1345e08b9637SCarsten Otte 		goto out_err;
1346e08b9637SCarsten Otte #else
1347e08b9637SCarsten Otte 	if (type)
1348e08b9637SCarsten Otte 		goto out_err;
1349e08b9637SCarsten Otte #endif
1350e08b9637SCarsten Otte 
1351b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1352b0c632dbSHeiko Carstens 	if (rc)
1353d89f5effSJan Kiszka 		goto out_err;
1354b0c632dbSHeiko Carstens 
1355b290411aSCarsten Otte 	rc = -ENOMEM;
1356b290411aSCarsten Otte 
13577d0a5e62SJanosch Frank 	ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
13587d0a5e62SJanosch Frank 
13597d43bafcSEugene (jno) Dvurechenski 	kvm->arch.use_esca = 0; /* start with basic SCA */
136076a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
136176a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
13625e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
136376a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1364b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1365d89f5effSJan Kiszka 		goto out_err;
1366f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1367c5c2c393SDavid Hildenbrand 	sca_offset += 16;
1368bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1369c5c2c393SDavid Hildenbrand 		sca_offset = 0;
1370bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
1371bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
1372f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1373b0c632dbSHeiko Carstens 
1374b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1375b0c632dbSHeiko Carstens 
13761cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1377b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
137840f5b735SDominik Dingel 		goto out_err;
1379b0c632dbSHeiko Carstens 
1380c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
1381c54f0d6aSDavid Hildenbrand 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1382c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
138340f5b735SDominik Dingel 		goto out_err;
13849d8d5786SMichael Mueller 
1385fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1386c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
138794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
13889d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
13899d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1390c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
13919d8d5786SMichael Mueller 		else
1392c54f0d6aSDavid Hildenbrand 			kvm->arch.model.fac_mask[i] = 0UL;
13939d8d5786SMichael Mueller 	}
13949d8d5786SMichael Mueller 
1395981467c9SMichael Mueller 	/* Populate the facility list initially. */
1396c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1397c54f0d6aSDavid Hildenbrand 	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1398981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1399981467c9SMichael Mueller 
140095ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
140195ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
140295ca2cb5SJanosch Frank 
14039bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
140437c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
14059d8d5786SMichael Mueller 
1406c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
14075102ee87STony Krowiak 
1408ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
14096d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
14106d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
14118a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1412a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1413ba5c1e9bSCarsten Otte 
1414b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
141578f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1416b0c632dbSHeiko Carstens 
1417e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1418e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1419a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1420e08b9637SCarsten Otte 	} else {
142132e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
1422a3a92c31SDominik Dingel 			kvm->arch.mem_limit = TASK_MAX_SIZE;
142332e6b236SGuenther Hutzl 		else
142432e6b236SGuenther Hutzl 			kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
142532e6b236SGuenther Hutzl 						    sclp.hamax + 1);
14266ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1427598841caSCarsten Otte 		if (!kvm->arch.gmap)
142840f5b735SDominik Dingel 			goto out_err;
14292c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
143024eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1431e08b9637SCarsten Otte 	}
1432fa6b7fe9SCornelia Huck 
1433fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
143484223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
143572f25020SJason J. Herne 	kvm->arch.epoch = 0;
1436fa6b7fe9SCornelia Huck 
14378ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
1438a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
14398335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
14408ad35755SDavid Hildenbrand 
1441d89f5effSJan Kiszka 	return 0;
1442d89f5effSJan Kiszka out_err:
1443c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
144440f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
14457d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
144678f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1447d89f5effSJan Kiszka 	return rc;
1448b0c632dbSHeiko Carstens }
1449b0c632dbSHeiko Carstens 
1450d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1451d329c035SChristian Borntraeger {
1452d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1453ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
145467335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
14553c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
1456bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
1457a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
145827e0393fSCarsten Otte 
145927e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
14606ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
146127e0393fSCarsten Otte 
1462e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1463b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1464d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1465b31288faSKonstantin Weitz 
14666692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1467b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1468d329c035SChristian Borntraeger }
1469d329c035SChristian Borntraeger 
1470d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1471d329c035SChristian Borntraeger {
1472d329c035SChristian Borntraeger 	unsigned int i;
1473988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1474d329c035SChristian Borntraeger 
1475988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1476988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1477988a2caeSGleb Natapov 
1478988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1479988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1480d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1481988a2caeSGleb Natapov 
1482988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1483988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1484d329c035SChristian Borntraeger }
1485d329c035SChristian Borntraeger 
1486b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1487b0c632dbSHeiko Carstens {
1488d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
14897d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
1490d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
1491c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
149227e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
14936ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
1494841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
149567335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1496a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
14978335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1498b0c632dbSHeiko Carstens }
1499b0c632dbSHeiko Carstens 
1500b0c632dbSHeiko Carstens /* Section: vcpu related */
1501dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1502b0c632dbSHeiko Carstens {
15036ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
150427e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
150527e0393fSCarsten Otte 		return -ENOMEM;
15062c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1507dafd032aSDominik Dingel 
150827e0393fSCarsten Otte 	return 0;
150927e0393fSCarsten Otte }
151027e0393fSCarsten Otte 
1511a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1512a6e2f683SEugene (jno) Dvurechenski {
15135e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
15147d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
15157d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
15167d43bafcSEugene (jno) Dvurechenski 
15177d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15187d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
15197d43bafcSEugene (jno) Dvurechenski 	} else {
1520bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1521a6e2f683SEugene (jno) Dvurechenski 
1522a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1523a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
1524a6e2f683SEugene (jno) Dvurechenski 	}
15255e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
15267d43bafcSEugene (jno) Dvurechenski }
1527a6e2f683SEugene (jno) Dvurechenski 
1528eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1529a6e2f683SEugene (jno) Dvurechenski {
1530eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
1531eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
1532eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
15337d43bafcSEugene (jno) Dvurechenski 
1534eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
15357d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
15367d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
153725508824SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x04U;
1538eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
15397d43bafcSEugene (jno) Dvurechenski 	} else {
1540eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
1541a6e2f683SEugene (jno) Dvurechenski 
1542eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1543a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1544a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1545eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1546a6e2f683SEugene (jno) Dvurechenski 	}
1547eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
15485e044315SEugene (jno) Dvurechenski }
15495e044315SEugene (jno) Dvurechenski 
15505e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
15515e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
15525e044315SEugene (jno) Dvurechenski {
15535e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
15545e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
15555e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
15565e044315SEugene (jno) Dvurechenski }
15575e044315SEugene (jno) Dvurechenski 
15585e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
15595e044315SEugene (jno) Dvurechenski {
15605e044315SEugene (jno) Dvurechenski 	int i;
15615e044315SEugene (jno) Dvurechenski 
15625e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
15635e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
15645e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
15655e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
15665e044315SEugene (jno) Dvurechenski }
15675e044315SEugene (jno) Dvurechenski 
15685e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
15695e044315SEugene (jno) Dvurechenski {
15705e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
15715e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
15725e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
15735e044315SEugene (jno) Dvurechenski 	unsigned int vcpu_idx;
15745e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
15755e044315SEugene (jno) Dvurechenski 
15765e044315SEugene (jno) Dvurechenski 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
15775e044315SEugene (jno) Dvurechenski 	if (!new_sca)
15785e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
15795e044315SEugene (jno) Dvurechenski 
15805e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
15815e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
15825e044315SEugene (jno) Dvurechenski 
15835e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
15845e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
15855e044315SEugene (jno) Dvurechenski 
15865e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
15875e044315SEugene (jno) Dvurechenski 
15885e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
15895e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
15905e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
15915e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->ecb2 |= 0x04U;
15925e044315SEugene (jno) Dvurechenski 	}
15935e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
15945e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
15955e044315SEugene (jno) Dvurechenski 
15965e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
15975e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
15985e044315SEugene (jno) Dvurechenski 
15995e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
16005e044315SEugene (jno) Dvurechenski 
16018335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
16028335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
16035e044315SEugene (jno) Dvurechenski 	return 0;
16047d43bafcSEugene (jno) Dvurechenski }
1605a6e2f683SEugene (jno) Dvurechenski 
1606a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1607a6e2f683SEugene (jno) Dvurechenski {
16085e044315SEugene (jno) Dvurechenski 	int rc;
16095e044315SEugene (jno) Dvurechenski 
16105e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
16115e044315SEugene (jno) Dvurechenski 		return true;
161276a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
16135e044315SEugene (jno) Dvurechenski 		return false;
16145e044315SEugene (jno) Dvurechenski 
16155e044315SEugene (jno) Dvurechenski 	mutex_lock(&kvm->lock);
16165e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
16175e044315SEugene (jno) Dvurechenski 	mutex_unlock(&kvm->lock);
16185e044315SEugene (jno) Dvurechenski 
16195e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1620a6e2f683SEugene (jno) Dvurechenski }
1621a6e2f683SEugene (jno) Dvurechenski 
1622dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1623dafd032aSDominik Dingel {
1624dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1625dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
162659674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
162759674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
16289eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1629b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1630b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1631b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
1632c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1633c6e5f166SFan Zhang 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1634f6aa6dc4SDavid Hildenbrand 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
1635f6aa6dc4SDavid Hildenbrand 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1636f6aa6dc4SDavid Hildenbrand 	 */
1637f6aa6dc4SDavid Hildenbrand 	if (MACHINE_HAS_VX)
163868c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
16396fd8e67dSDavid Hildenbrand 	else
16406fd8e67dSDavid Hildenbrand 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
1641dafd032aSDominik Dingel 
1642dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1643dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1644dafd032aSDominik Dingel 
1645b0c632dbSHeiko Carstens 	return 0;
1646b0c632dbSHeiko Carstens }
1647b0c632dbSHeiko Carstens 
1648db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1649db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1650db0758b2SDavid Hildenbrand {
1651db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
16529c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1653db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
16549c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1655db0758b2SDavid Hildenbrand }
1656db0758b2SDavid Hildenbrand 
1657db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1658db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1659db0758b2SDavid Hildenbrand {
1660db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
16619c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1662db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1663db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
16649c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1665db0758b2SDavid Hildenbrand }
1666db0758b2SDavid Hildenbrand 
1667db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1668db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1669db0758b2SDavid Hildenbrand {
1670db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1671db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
1672db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
1673db0758b2SDavid Hildenbrand }
1674db0758b2SDavid Hildenbrand 
1675db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1676db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1677db0758b2SDavid Hildenbrand {
1678db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1679db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
1680db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
1681db0758b2SDavid Hildenbrand }
1682db0758b2SDavid Hildenbrand 
1683db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1684db0758b2SDavid Hildenbrand {
1685db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1686db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
1687db0758b2SDavid Hildenbrand 	preempt_enable();
1688db0758b2SDavid Hildenbrand }
1689db0758b2SDavid Hildenbrand 
1690db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1691db0758b2SDavid Hildenbrand {
1692db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1693db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
1694db0758b2SDavid Hildenbrand 	preempt_enable();
1695db0758b2SDavid Hildenbrand }
1696db0758b2SDavid Hildenbrand 
16974287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
16984287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
16994287f247SDavid Hildenbrand {
1700db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17019c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
1702db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
1703db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
17044287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
17059c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
1706db0758b2SDavid Hildenbrand 	preempt_enable();
17074287f247SDavid Hildenbrand }
17084287f247SDavid Hildenbrand 
1709db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
17104287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
17114287f247SDavid Hildenbrand {
17129c23a131SDavid Hildenbrand 	unsigned int seq;
1713db0758b2SDavid Hildenbrand 	__u64 value;
1714db0758b2SDavid Hildenbrand 
1715db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
17164287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
1717db0758b2SDavid Hildenbrand 
17189c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
17199c23a131SDavid Hildenbrand 	do {
17209c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
17219c23a131SDavid Hildenbrand 		/*
17229c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
17239c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
17249c23a131SDavid Hildenbrand 		 */
17259c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1726db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
17279c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
17289c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
1729db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
17309c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
17319c23a131SDavid Hildenbrand 	preempt_enable();
1732db0758b2SDavid Hildenbrand 	return value;
17334287f247SDavid Hildenbrand }
17344287f247SDavid Hildenbrand 
1735b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1736b0c632dbSHeiko Carstens {
17379977e886SHendrik Brueckner 	/* Save host register state */
1738d0164ee2SHendrik Brueckner 	save_fpu_regs();
17399abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
17409abc2a08SDavid Hildenbrand 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
174196b2d7a8SHendrik Brueckner 
17426fd8e67dSDavid Hildenbrand 	if (MACHINE_HAS_VX)
17439abc2a08SDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
17446fd8e67dSDavid Hildenbrand 	else
17456fd8e67dSDavid Hildenbrand 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
17469abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
17479977e886SHendrik Brueckner 	if (test_fp_ctl(current->thread.fpu.fpc))
174896b2d7a8SHendrik Brueckner 		/* User space provided an invalid FPC, let's clear it */
17499977e886SHendrik Brueckner 		current->thread.fpu.fpc = 0;
17509977e886SHendrik Brueckner 
17519977e886SHendrik Brueckner 	save_access_regs(vcpu->arch.host_acrs);
175259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
175337d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
1754805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
17555ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1756db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
175701a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
1758b0c632dbSHeiko Carstens }
1759b0c632dbSHeiko Carstens 
1760b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1761b0c632dbSHeiko Carstens {
176201a745acSDavid Hildenbrand 	vcpu->cpu = -1;
17635ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
1764db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
1765805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
176637d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
176737d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
17689977e886SHendrik Brueckner 
17699abc2a08SDavid Hildenbrand 	/* Save guest register state */
1770d0164ee2SHendrik Brueckner 	save_fpu_regs();
17719977e886SHendrik Brueckner 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
17729abc2a08SDavid Hildenbrand 
17739abc2a08SDavid Hildenbrand 	/* Restore host register state */
17749abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
17759abc2a08SDavid Hildenbrand 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
17769977e886SHendrik Brueckner 
17779977e886SHendrik Brueckner 	save_access_regs(vcpu->run->s.regs.acrs);
1778b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1779b0c632dbSHeiko Carstens }
1780b0c632dbSHeiko Carstens 
1781b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1782b0c632dbSHeiko Carstens {
1783b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1784b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1785b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
17868d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
17874287f247SDavid Hildenbrand 	kvm_s390_set_cpu_timer(vcpu, 0);
1788b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1789b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1790b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1791b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1792b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
17939abc2a08SDavid Hildenbrand 	/* make sure the new fpc will be lazily loaded */
17949abc2a08SDavid Hildenbrand 	save_fpu_regs();
17959abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = 0;
1796b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1797672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
17983c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
17993c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
18006352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
18016852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
18022ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1803b0c632dbSHeiko Carstens }
1804b0c632dbSHeiko Carstens 
180531928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
180642897d86SMarcelo Tosatti {
180772f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1808fdf03650SFan Zhang 	preempt_disable();
180972f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1810fdf03650SFan Zhang 	preempt_enable();
181172f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
181225508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
1813dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1814eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
181525508824SDavid Hildenbrand 	}
181637d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
181737d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
181842897d86SMarcelo Tosatti }
181942897d86SMarcelo Tosatti 
18205102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
18215102ee87STony Krowiak {
18229d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
18235102ee87STony Krowiak 		return;
18245102ee87STony Krowiak 
1825a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1826a374e892STony Krowiak 
1827a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1828a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1829a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1830a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1831a374e892STony Krowiak 
18325102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
18335102ee87STony Krowiak }
18345102ee87STony Krowiak 
1835b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1836b31605c1SDominik Dingel {
1837b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1838b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1839b31605c1SDominik Dingel }
1840b31605c1SDominik Dingel 
1841b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1842b31605c1SDominik Dingel {
1843b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1844b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1845b31605c1SDominik Dingel 		return -ENOMEM;
1846b31605c1SDominik Dingel 
1847b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1848b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1849b31605c1SDominik Dingel 	return 0;
1850b31605c1SDominik Dingel }
1851b31605c1SDominik Dingel 
185291520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
185391520f1aSMichael Mueller {
185491520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
185591520f1aSMichael Mueller 
185691520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
185780bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
1858c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
185991520f1aSMichael Mueller }
186091520f1aSMichael Mueller 
1861b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1862b0c632dbSHeiko Carstens {
1863b31605c1SDominik Dingel 	int rc = 0;
1864b31288faSKonstantin Weitz 
18659e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
18669e6dabefSCornelia Huck 						    CPUSTAT_SM |
1867a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1868a4a4f191SGuenther Hutzl 
186953df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
1870805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
187153df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1872805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1873a4a4f191SGuenther Hutzl 
187491520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
187591520f1aSMichael Mueller 
1876bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1877bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
1878bdab09f3SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x02;
1879bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
1880bd50e8ecSDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= 0x04;
1881f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
18827feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
18837feb6bb8SMichael Mueller 
1884873b425eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
1885d6af0b49SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= 0x08;
188648ee7d3aSDavid Hildenbrand 	vcpu->arch.sie_block->eca = 0x1002000U;
188748ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
188848ee7d3aSDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x80000000U;
188911ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
189011ad65b7SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x40000000U;
189137c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1892217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
189337c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1894ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1895c6e5f166SFan Zhang 	if (test_kvm_facility(vcpu->kvm, 64))
1896c6e5f166SFan Zhang 		vcpu->arch.sie_block->ecb3 |= 0x01;
189718280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
189813211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
189913211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
190013211ea7SEric Farman 	}
1901c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1902492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
190395ca2cb5SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 74))
190495ca2cb5SJanosch Frank 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
19055a5e6536SMatthew Rosato 
1906e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1907b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1908b31605c1SDominik Dingel 		if (rc)
1909b31605c1SDominik Dingel 			return rc;
1910b31288faSKonstantin Weitz 	}
19110ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1912ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
19139d8d5786SMichael Mueller 
19145102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
19155102ee87STony Krowiak 
1916b31605c1SDominik Dingel 	return rc;
1917b0c632dbSHeiko Carstens }
1918b0c632dbSHeiko Carstens 
1919b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1920b0c632dbSHeiko Carstens 				      unsigned int id)
1921b0c632dbSHeiko Carstens {
19224d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
19237feb6bb8SMichael Mueller 	struct sie_page *sie_page;
19244d47555aSCarsten Otte 	int rc = -EINVAL;
1925b0c632dbSHeiko Carstens 
19264215825eSDavid Hildenbrand 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
19274d47555aSCarsten Otte 		goto out;
19284d47555aSCarsten Otte 
19294d47555aSCarsten Otte 	rc = -ENOMEM;
19304d47555aSCarsten Otte 
1931b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1932b0c632dbSHeiko Carstens 	if (!vcpu)
19334d47555aSCarsten Otte 		goto out;
1934b0c632dbSHeiko Carstens 
19357feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
19367feb6bb8SMichael Mueller 	if (!sie_page)
1937b0c632dbSHeiko Carstens 		goto out_free_cpu;
1938b0c632dbSHeiko Carstens 
19397feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
19407feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
19417feb6bb8SMichael Mueller 
1942efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
1943efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
1944efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
1945efed1104SDavid Hildenbrand 
1946b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
1947ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1948ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1949d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
19505288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
19519c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
1952ba5c1e9bSCarsten Otte 
1953b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1954b0c632dbSHeiko Carstens 	if (rc)
19559abc2a08SDavid Hildenbrand 		goto out_free_sie_block;
19568335713aSChristian Borntraeger 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1957b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1958ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1959b0c632dbSHeiko Carstens 
1960b0c632dbSHeiko Carstens 	return vcpu;
19617b06bf2fSWei Yongjun out_free_sie_block:
19627b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1963b0c632dbSHeiko Carstens out_free_cpu:
1964b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
19654d47555aSCarsten Otte out:
1966b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1967b0c632dbSHeiko Carstens }
1968b0c632dbSHeiko Carstens 
1969b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1970b0c632dbSHeiko Carstens {
19719a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1972b0c632dbSHeiko Carstens }
1973b0c632dbSHeiko Carstens 
197427406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
197549b99e1eSChristian Borntraeger {
1976805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
197761a6df54SDavid Hildenbrand 	exit_sie(vcpu);
197849b99e1eSChristian Borntraeger }
197949b99e1eSChristian Borntraeger 
198027406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
198149b99e1eSChristian Borntraeger {
1982805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
198349b99e1eSChristian Borntraeger }
198449b99e1eSChristian Borntraeger 
19858e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
19868e236546SChristian Borntraeger {
1987805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
198861a6df54SDavid Hildenbrand 	exit_sie(vcpu);
19898e236546SChristian Borntraeger }
19908e236546SChristian Borntraeger 
19918e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
19928e236546SChristian Borntraeger {
19939bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
19948e236546SChristian Borntraeger }
19958e236546SChristian Borntraeger 
199649b99e1eSChristian Borntraeger /*
199749b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
199849b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
199949b99e1eSChristian Borntraeger  * return immediately. */
200049b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
200149b99e1eSChristian Borntraeger {
2002805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
200349b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
200449b99e1eSChristian Borntraeger 		cpu_relax();
200549b99e1eSChristian Borntraeger }
200649b99e1eSChristian Borntraeger 
20078e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
20088e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
200949b99e1eSChristian Borntraeger {
20108e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
20118e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
201249b99e1eSChristian Borntraeger }
201349b99e1eSChristian Borntraeger 
2014414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2015414d3b07SMartin Schwidefsky 			      unsigned long end)
20162c70fe44SChristian Borntraeger {
20172c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
20182c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
2019414d3b07SMartin Schwidefsky 	unsigned long prefix;
2020414d3b07SMartin Schwidefsky 	int i;
20212c70fe44SChristian Borntraeger 
202265d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
202365d0b0d4SDavid Hildenbrand 		return;
2024414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
2025414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
2026414d3b07SMartin Schwidefsky 		return;
20272c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
20282c70fe44SChristian Borntraeger 		/* match against both prefix pages */
2029414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
2030414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2031414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2032414d3b07SMartin Schwidefsky 				   start, end);
20338e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
20342c70fe44SChristian Borntraeger 		}
20352c70fe44SChristian Borntraeger 	}
20362c70fe44SChristian Borntraeger }
20372c70fe44SChristian Borntraeger 
2038b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2039b6d33834SChristoffer Dall {
2040b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
2041b6d33834SChristoffer Dall 	BUG();
2042b6d33834SChristoffer Dall 	return 0;
2043b6d33834SChristoffer Dall }
2044b6d33834SChristoffer Dall 
204514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
204614eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
204714eebd91SCarsten Otte {
204814eebd91SCarsten Otte 	int r = -EINVAL;
204914eebd91SCarsten Otte 
205014eebd91SCarsten Otte 	switch (reg->id) {
205129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
205229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
205329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
205429b7c71bSCarsten Otte 		break;
205529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
205629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
205729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
205829b7c71bSCarsten Otte 		break;
205946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
20604287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
206146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
206246a6dd1cSJason J. herne 		break;
206346a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
206446a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
206546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
206646a6dd1cSJason J. herne 		break;
2067536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2068536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
2069536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2070536336c2SDominik Dingel 		break;
2071536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2072536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
2073536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2074536336c2SDominik Dingel 		break;
2075536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2076536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
2077536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2078536336c2SDominik Dingel 		break;
2079672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2080672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
2081672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2082672550fbSChristian Borntraeger 		break;
2083afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2084afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
2085afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2086afa45ff5SChristian Borntraeger 		break;
208714eebd91SCarsten Otte 	default:
208814eebd91SCarsten Otte 		break;
208914eebd91SCarsten Otte 	}
209014eebd91SCarsten Otte 
209114eebd91SCarsten Otte 	return r;
209214eebd91SCarsten Otte }
209314eebd91SCarsten Otte 
209414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
209514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
209614eebd91SCarsten Otte {
209714eebd91SCarsten Otte 	int r = -EINVAL;
20984287f247SDavid Hildenbrand 	__u64 val;
209914eebd91SCarsten Otte 
210014eebd91SCarsten Otte 	switch (reg->id) {
210129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
210229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
210329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
210429b7c71bSCarsten Otte 		break;
210529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
210629b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
210729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
210829b7c71bSCarsten Otte 		break;
210946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
21104287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
21114287f247SDavid Hildenbrand 		if (!r)
21124287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
211346a6dd1cSJason J. herne 		break;
211446a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
211546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
211646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
211746a6dd1cSJason J. herne 		break;
2118536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
2119536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
2120536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
21219fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
21229fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2123536336c2SDominik Dingel 		break;
2124536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
2125536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
2126536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2127536336c2SDominik Dingel 		break;
2128536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
2129536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
2130536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
2131536336c2SDominik Dingel 		break;
2132672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
2133672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
2134672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
2135672550fbSChristian Borntraeger 		break;
2136afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
2137afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
2138afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
2139afa45ff5SChristian Borntraeger 		break;
214014eebd91SCarsten Otte 	default:
214114eebd91SCarsten Otte 		break;
214214eebd91SCarsten Otte 	}
214314eebd91SCarsten Otte 
214414eebd91SCarsten Otte 	return r;
214514eebd91SCarsten Otte }
2146b6d33834SChristoffer Dall 
2147b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2148b0c632dbSHeiko Carstens {
2149b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
2150b0c632dbSHeiko Carstens 	return 0;
2151b0c632dbSHeiko Carstens }
2152b0c632dbSHeiko Carstens 
2153b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2154b0c632dbSHeiko Carstens {
21555a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
2156b0c632dbSHeiko Carstens 	return 0;
2157b0c632dbSHeiko Carstens }
2158b0c632dbSHeiko Carstens 
2159b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2160b0c632dbSHeiko Carstens {
21615a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2162b0c632dbSHeiko Carstens 	return 0;
2163b0c632dbSHeiko Carstens }
2164b0c632dbSHeiko Carstens 
2165b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2166b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2167b0c632dbSHeiko Carstens {
216859674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2169b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
217059674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
2171b0c632dbSHeiko Carstens 	return 0;
2172b0c632dbSHeiko Carstens }
2173b0c632dbSHeiko Carstens 
2174b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2175b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
2176b0c632dbSHeiko Carstens {
217759674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2178b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2179b0c632dbSHeiko Carstens 	return 0;
2180b0c632dbSHeiko Carstens }
2181b0c632dbSHeiko Carstens 
2182b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2183b0c632dbSHeiko Carstens {
21849abc2a08SDavid Hildenbrand 	/* make sure the new values will be lazily loaded */
21859abc2a08SDavid Hildenbrand 	save_fpu_regs();
21864725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
21874725c860SMartin Schwidefsky 		return -EINVAL;
21889abc2a08SDavid Hildenbrand 	current->thread.fpu.fpc = fpu->fpc;
21899abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
21909abc2a08SDavid Hildenbrand 		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
21919abc2a08SDavid Hildenbrand 	else
21929abc2a08SDavid Hildenbrand 		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
2193b0c632dbSHeiko Carstens 	return 0;
2194b0c632dbSHeiko Carstens }
2195b0c632dbSHeiko Carstens 
2196b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2197b0c632dbSHeiko Carstens {
21989abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
21999abc2a08SDavid Hildenbrand 	save_fpu_regs();
22009abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
22019abc2a08SDavid Hildenbrand 		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
22029abc2a08SDavid Hildenbrand 	else
22039abc2a08SDavid Hildenbrand 		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
22049abc2a08SDavid Hildenbrand 	fpu->fpc = current->thread.fpu.fpc;
2205b0c632dbSHeiko Carstens 	return 0;
2206b0c632dbSHeiko Carstens }
2207b0c632dbSHeiko Carstens 
2208b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2209b0c632dbSHeiko Carstens {
2210b0c632dbSHeiko Carstens 	int rc = 0;
2211b0c632dbSHeiko Carstens 
22127a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
2213b0c632dbSHeiko Carstens 		rc = -EBUSY;
2214d7b0b5ebSCarsten Otte 	else {
2215d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
2216d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
2217d7b0b5ebSCarsten Otte 	}
2218b0c632dbSHeiko Carstens 	return rc;
2219b0c632dbSHeiko Carstens }
2220b0c632dbSHeiko Carstens 
2221b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2222b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
2223b0c632dbSHeiko Carstens {
2224b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
2225b0c632dbSHeiko Carstens }
2226b0c632dbSHeiko Carstens 
222727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
222827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
222927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
223027291e21SDavid Hildenbrand 
2231d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2232d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
2233b0c632dbSHeiko Carstens {
223427291e21SDavid Hildenbrand 	int rc = 0;
223527291e21SDavid Hildenbrand 
223627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
223727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
223827291e21SDavid Hildenbrand 
22392de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
224027291e21SDavid Hildenbrand 		return -EINVAL;
224189b5b4deSDavid Hildenbrand 	if (!sclp.has_gpere)
224289b5b4deSDavid Hildenbrand 		return -EINVAL;
224327291e21SDavid Hildenbrand 
224427291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
224527291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
224627291e21SDavid Hildenbrand 		/* enforce guest PER */
2247805de8f4SPeter Zijlstra 		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
224827291e21SDavid Hildenbrand 
224927291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
225027291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
225127291e21SDavid Hildenbrand 	} else {
2252805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
225327291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
225427291e21SDavid Hildenbrand 	}
225527291e21SDavid Hildenbrand 
225627291e21SDavid Hildenbrand 	if (rc) {
225727291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
225827291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
2259805de8f4SPeter Zijlstra 		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
226027291e21SDavid Hildenbrand 	}
226127291e21SDavid Hildenbrand 
226227291e21SDavid Hildenbrand 	return rc;
2263b0c632dbSHeiko Carstens }
2264b0c632dbSHeiko Carstens 
226562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
226662d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
226762d9f0dbSMarcelo Tosatti {
22686352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
22696352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
22706352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
227162d9f0dbSMarcelo Tosatti }
227262d9f0dbSMarcelo Tosatti 
227362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
227462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
227562d9f0dbSMarcelo Tosatti {
22766352e4d2SDavid Hildenbrand 	int rc = 0;
22776352e4d2SDavid Hildenbrand 
22786352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
22796352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
22806352e4d2SDavid Hildenbrand 
22816352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
22826352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
22836352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
22846352e4d2SDavid Hildenbrand 		break;
22856352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
22866352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
22876352e4d2SDavid Hildenbrand 		break;
22886352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
22896352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
22906352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
22916352e4d2SDavid Hildenbrand 	default:
22926352e4d2SDavid Hildenbrand 		rc = -ENXIO;
22936352e4d2SDavid Hildenbrand 	}
22946352e4d2SDavid Hildenbrand 
22956352e4d2SDavid Hildenbrand 	return rc;
229662d9f0dbSMarcelo Tosatti }
229762d9f0dbSMarcelo Tosatti 
22988ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
22998ad35755SDavid Hildenbrand {
23008ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
23018ad35755SDavid Hildenbrand }
23028ad35755SDavid Hildenbrand 
23032c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
23042c70fe44SChristian Borntraeger {
23058ad35755SDavid Hildenbrand retry:
23068e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
2307586b7ccdSChristian Borntraeger 	if (!vcpu->requests)
2308586b7ccdSChristian Borntraeger 		return 0;
23092c70fe44SChristian Borntraeger 	/*
23102c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2311b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
23122c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
23132c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
23142c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
23152c70fe44SChristian Borntraeger 	 */
23168ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
23172c70fe44SChristian Borntraeger 		int rc;
2318b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
2319fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
2320b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
23212c70fe44SChristian Borntraeger 		if (rc)
23222c70fe44SChristian Borntraeger 			return rc;
23238ad35755SDavid Hildenbrand 		goto retry;
23242c70fe44SChristian Borntraeger 	}
23258ad35755SDavid Hildenbrand 
2326d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2327d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
2328d3d692c8SDavid Hildenbrand 		goto retry;
2329d3d692c8SDavid Hildenbrand 	}
2330d3d692c8SDavid Hildenbrand 
23318ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
23328ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
23338ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2334805de8f4SPeter Zijlstra 			atomic_or(CPUSTAT_IBS,
23358ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
23368ad35755SDavid Hildenbrand 		}
23378ad35755SDavid Hildenbrand 		goto retry;
23388ad35755SDavid Hildenbrand 	}
23398ad35755SDavid Hildenbrand 
23408ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
23418ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
23428ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2343805de8f4SPeter Zijlstra 			atomic_andnot(CPUSTAT_IBS,
23448ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
23458ad35755SDavid Hildenbrand 		}
23468ad35755SDavid Hildenbrand 		goto retry;
23478ad35755SDavid Hildenbrand 	}
23488ad35755SDavid Hildenbrand 
23490759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
23500759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
23510759d068SDavid Hildenbrand 
23522c70fe44SChristian Borntraeger 	return 0;
23532c70fe44SChristian Borntraeger }
23542c70fe44SChristian Borntraeger 
235525ed1675SDavid Hildenbrand void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
235625ed1675SDavid Hildenbrand {
235725ed1675SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
235825ed1675SDavid Hildenbrand 	int i;
235925ed1675SDavid Hildenbrand 
236025ed1675SDavid Hildenbrand 	mutex_lock(&kvm->lock);
236125ed1675SDavid Hildenbrand 	preempt_disable();
236225ed1675SDavid Hildenbrand 	kvm->arch.epoch = tod - get_tod_clock();
236325ed1675SDavid Hildenbrand 	kvm_s390_vcpu_block_all(kvm);
236425ed1675SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm)
236525ed1675SDavid Hildenbrand 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
236625ed1675SDavid Hildenbrand 	kvm_s390_vcpu_unblock_all(kvm);
236725ed1675SDavid Hildenbrand 	preempt_enable();
236825ed1675SDavid Hildenbrand 	mutex_unlock(&kvm->lock);
236925ed1675SDavid Hildenbrand }
237025ed1675SDavid Hildenbrand 
2371fa576c58SThomas Huth /**
2372fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
2373fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
2374fa576c58SThomas Huth  * @gpa: Guest physical address
2375fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
2376fa576c58SThomas Huth  *
2377fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
2378fa576c58SThomas Huth  *
2379fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
2380fa576c58SThomas Huth  */
2381fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
238224eb3a82SDominik Dingel {
2383527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
2384527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
238524eb3a82SDominik Dingel }
238624eb3a82SDominik Dingel 
23873c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
23883c038e6bSDominik Dingel 				      unsigned long token)
23893c038e6bSDominik Dingel {
23903c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
2391383d0b05SJens Freimann 	struct kvm_s390_irq irq;
23923c038e6bSDominik Dingel 
23933c038e6bSDominik Dingel 	if (start_token) {
2394383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
2395383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
2396383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
23973c038e6bSDominik Dingel 	} else {
23983c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
2399383d0b05SJens Freimann 		inti.parm64 = token;
24003c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
24013c038e6bSDominik Dingel 	}
24023c038e6bSDominik Dingel }
24033c038e6bSDominik Dingel 
24043c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
24053c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
24063c038e6bSDominik Dingel {
24073c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
24083c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
24093c038e6bSDominik Dingel }
24103c038e6bSDominik Dingel 
24113c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
24123c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
24133c038e6bSDominik Dingel {
24143c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
24153c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
24163c038e6bSDominik Dingel }
24173c038e6bSDominik Dingel 
24183c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
24193c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
24203c038e6bSDominik Dingel {
24213c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
24223c038e6bSDominik Dingel }
24233c038e6bSDominik Dingel 
24243c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
24253c038e6bSDominik Dingel {
24263c038e6bSDominik Dingel 	/*
24273c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
24283c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
24293c038e6bSDominik Dingel 	 */
24303c038e6bSDominik Dingel 	return true;
24313c038e6bSDominik Dingel }
24323c038e6bSDominik Dingel 
24333c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
24343c038e6bSDominik Dingel {
24353c038e6bSDominik Dingel 	hva_t hva;
24363c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
24373c038e6bSDominik Dingel 	int rc;
24383c038e6bSDominik Dingel 
24393c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
24403c038e6bSDominik Dingel 		return 0;
24413c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
24423c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
24433c038e6bSDominik Dingel 		return 0;
24443c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
24453c038e6bSDominik Dingel 		return 0;
24469a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
24473c038e6bSDominik Dingel 		return 0;
24483c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
24493c038e6bSDominik Dingel 		return 0;
24503c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
24513c038e6bSDominik Dingel 		return 0;
24523c038e6bSDominik Dingel 
245381480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
245481480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
245581480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
24563c038e6bSDominik Dingel 		return 0;
24573c038e6bSDominik Dingel 
24583c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
24593c038e6bSDominik Dingel 	return rc;
24603c038e6bSDominik Dingel }
24613c038e6bSDominik Dingel 
24623fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2463b0c632dbSHeiko Carstens {
24643fb4c40fSThomas Huth 	int rc, cpuflags;
2465e168bf8dSCarsten Otte 
24663c038e6bSDominik Dingel 	/*
24673c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
24683c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
24693c038e6bSDominik Dingel 	 * handled outside the worker.
24703c038e6bSDominik Dingel 	 */
24713c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
24723c038e6bSDominik Dingel 
24737ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
24747ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2475b0c632dbSHeiko Carstens 
2476b0c632dbSHeiko Carstens 	if (need_resched())
2477b0c632dbSHeiko Carstens 		schedule();
2478b0c632dbSHeiko Carstens 
2479d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
248071cde587SChristian Borntraeger 		s390_handle_mcck();
248171cde587SChristian Borntraeger 
248279395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
248379395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
248479395031SJens Freimann 		if (rc)
248579395031SJens Freimann 			return rc;
248679395031SJens Freimann 	}
24870ff31867SCarsten Otte 
24882c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
24892c70fe44SChristian Borntraeger 	if (rc)
24902c70fe44SChristian Borntraeger 		return rc;
24912c70fe44SChristian Borntraeger 
249227291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
249327291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
249427291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
249527291e21SDavid Hildenbrand 	}
249627291e21SDavid Hildenbrand 
2497b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
24983fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
24993fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
25003fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
25012b29a9fdSDominik Dingel 
25023fb4c40fSThomas Huth 	return 0;
25033fb4c40fSThomas Huth }
25043fb4c40fSThomas Huth 
2505492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2506492d8642SThomas Huth {
250756317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
250856317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
250956317920SDavid Hildenbrand 	};
251056317920SDavid Hildenbrand 	u8 opcode, ilen;
2511492d8642SThomas Huth 	int rc;
2512492d8642SThomas Huth 
2513492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2514492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2515492d8642SThomas Huth 
2516492d8642SThomas Huth 	/*
2517492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2518492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2519492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2520492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2521492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2522492d8642SThomas Huth 	 * to be able to forward the PSW.
2523492d8642SThomas Huth 	 */
252465977322SDavid Hildenbrand 	rc = read_guest_instr(vcpu, &opcode, 1);
252556317920SDavid Hildenbrand 	ilen = insn_length(opcode);
25269b0d721aSDavid Hildenbrand 	if (rc < 0) {
25279b0d721aSDavid Hildenbrand 		return rc;
25289b0d721aSDavid Hildenbrand 	} else if (rc) {
25299b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
25309b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
25319b0d721aSDavid Hildenbrand 		 * nullification if necessary.
25329b0d721aSDavid Hildenbrand 		 */
25339b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
25349b0d721aSDavid Hildenbrand 		ilen = 4;
25359b0d721aSDavid Hildenbrand 	}
253656317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
253756317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
253856317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
2539492d8642SThomas Huth }
2540492d8642SThomas Huth 
25413fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
25423fb4c40fSThomas Huth {
25432b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
25442b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
25452b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
25462b29a9fdSDominik Dingel 
254727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
254827291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
254927291e21SDavid Hildenbrand 
25507ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
25517ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
255271f116bfSDavid Hildenbrand 
255371f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
255471f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
255571f116bfSDavid Hildenbrand 
255671f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
255771f116bfSDavid Hildenbrand 			return rc;
255871f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
255971f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
256071f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
256171f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
256271f116bfSDavid Hildenbrand 		return -EREMOTE;
256371f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
256471f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
256571f116bfSDavid Hildenbrand 		return 0;
2566210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2567210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2568210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2569210b1607SThomas Huth 						current->thread.gmap_addr;
2570210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
257171f116bfSDavid Hildenbrand 		return -EREMOTE;
257224eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
25733c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
257424eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
257571f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
257671f116bfSDavid Hildenbrand 			return 0;
257771f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2578fa576c58SThomas Huth 	}
257971f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
25803fb4c40fSThomas Huth }
25813fb4c40fSThomas Huth 
25823fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
25833fb4c40fSThomas Huth {
25843fb4c40fSThomas Huth 	int rc, exit_reason;
25853fb4c40fSThomas Huth 
2586800c1065SThomas Huth 	/*
2587800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2588800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2589800c1065SThomas Huth 	 */
2590800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2591800c1065SThomas Huth 
2592a76ccff6SThomas Huth 	do {
25933fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
25943fb4c40fSThomas Huth 		if (rc)
2595a76ccff6SThomas Huth 			break;
25963fb4c40fSThomas Huth 
2597800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
25983fb4c40fSThomas Huth 		/*
2599a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2600a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
26013fb4c40fSThomas Huth 		 */
26020097d12eSChristian Borntraeger 		local_irq_disable();
26030097d12eSChristian Borntraeger 		__kvm_guest_enter();
2604db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
26050097d12eSChristian Borntraeger 		local_irq_enable();
2606a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2607a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
26080097d12eSChristian Borntraeger 		local_irq_disable();
2609db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
26100097d12eSChristian Borntraeger 		__kvm_guest_exit();
26110097d12eSChristian Borntraeger 		local_irq_enable();
2612800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
26133fb4c40fSThomas Huth 
26143fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
261527291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
26163fb4c40fSThomas Huth 
2617800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2618e168bf8dSCarsten Otte 	return rc;
2619b0c632dbSHeiko Carstens }
2620b0c632dbSHeiko Carstens 
2621b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2622b028ee3eSDavid Hildenbrand {
2623b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2624b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2625b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2626b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2627b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2628b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2629d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2630d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2631b028ee3eSDavid Hildenbrand 	}
2632b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
26334287f247SDavid Hildenbrand 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
2634b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2635b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2636b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2637b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2638b028ee3eSDavid Hildenbrand 	}
2639b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2640b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2641b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2642b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
26439fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
26449fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2645b028ee3eSDavid Hildenbrand 	}
2646b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2647b028ee3eSDavid Hildenbrand }
2648b028ee3eSDavid Hildenbrand 
2649b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2650b028ee3eSDavid Hildenbrand {
2651b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2652b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2653b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2654b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
26554287f247SDavid Hildenbrand 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
2656b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2657b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2658b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2659b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2660b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2661b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2662b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2663b028ee3eSDavid Hildenbrand }
2664b028ee3eSDavid Hildenbrand 
2665b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2666b0c632dbSHeiko Carstens {
26678f2abe6aSChristian Borntraeger 	int rc;
2668b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2669b0c632dbSHeiko Carstens 
267027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
267127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
267227291e21SDavid Hildenbrand 		return 0;
267327291e21SDavid Hildenbrand 	}
267427291e21SDavid Hildenbrand 
2675b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2676b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2677b0c632dbSHeiko Carstens 
26786352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
26796852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
26806352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2681ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
26826352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
26836352e4d2SDavid Hildenbrand 		return -EINVAL;
26846352e4d2SDavid Hildenbrand 	}
2685b0c632dbSHeiko Carstens 
2686b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2687db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
2688d7b0b5ebSCarsten Otte 
2689dab4079dSHeiko Carstens 	might_fault();
2690e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
26919ace903dSChristian Ehrhardt 
2692b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2693b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
26948f2abe6aSChristian Borntraeger 		rc = -EINTR;
2695b1d16c49SChristian Ehrhardt 	}
26968f2abe6aSChristian Borntraeger 
269727291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
269827291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
269927291e21SDavid Hildenbrand 		rc = 0;
270027291e21SDavid Hildenbrand 	}
270127291e21SDavid Hildenbrand 
27028f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
270371f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
27048f2abe6aSChristian Borntraeger 		rc = 0;
27058f2abe6aSChristian Borntraeger 	}
27068f2abe6aSChristian Borntraeger 
2707db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
2708b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2709d7b0b5ebSCarsten Otte 
2710b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2711b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2712b0c632dbSHeiko Carstens 
2713b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
27147e8e6ab4SHeiko Carstens 	return rc;
2715b0c632dbSHeiko Carstens }
2716b0c632dbSHeiko Carstens 
2717b0c632dbSHeiko Carstens /*
2718b0c632dbSHeiko Carstens  * store status at address
2719b0c632dbSHeiko Carstens  * we use have two special cases:
2720b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2721b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2722b0c632dbSHeiko Carstens  */
2723d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2724b0c632dbSHeiko Carstens {
2725092670cdSCarsten Otte 	unsigned char archmode = 1;
27269abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
2727fda902cbSMichael Mueller 	unsigned int px;
27284287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
2729d0bce605SHeiko Carstens 	int rc;
2730b0c632dbSHeiko Carstens 
2731d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
2732d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2733d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2734b0c632dbSHeiko Carstens 			return -EFAULT;
2735d9a3a09aSMartin Schwidefsky 		gpa = 0;
2736d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2737d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2738b0c632dbSHeiko Carstens 			return -EFAULT;
2739d9a3a09aSMartin Schwidefsky 		gpa = px;
2740d9a3a09aSMartin Schwidefsky 	} else
2741d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
27429abc2a08SDavid Hildenbrand 
27439abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
27449abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
27459522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
2746d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
27479abc2a08SDavid Hildenbrand 				     fprs, 128);
27489abc2a08SDavid Hildenbrand 	} else {
27499abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
27506fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
27519abc2a08SDavid Hildenbrand 	}
2752d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2753d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2754d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2755d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2756d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2757fda902cbSMichael Mueller 			      &px, 4);
2758d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
27599abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
2760d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2761d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
27624287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
2763d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
27644287f247SDavid Hildenbrand 			      &cputm, 8);
2765178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2766d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2767d0bce605SHeiko Carstens 			      &clkcomp, 8);
2768d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2769d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2770d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2771d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2772d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2773b0c632dbSHeiko Carstens }
2774b0c632dbSHeiko Carstens 
2775e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2776e879892cSThomas Huth {
2777e879892cSThomas Huth 	/*
2778e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2779e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2780e879892cSThomas Huth 	 * it into the save area
2781e879892cSThomas Huth 	 */
2782d0164ee2SHendrik Brueckner 	save_fpu_regs();
27839abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2784e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2785e879892cSThomas Huth 
2786e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2787e879892cSThomas Huth }
2788e879892cSThomas Huth 
2789bc17de7cSEric Farman /*
2790bc17de7cSEric Farman  * store additional status at address
2791bc17de7cSEric Farman  */
2792bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2793bc17de7cSEric Farman 					unsigned long gpa)
2794bc17de7cSEric Farman {
2795bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2796bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2797bc17de7cSEric Farman 		return 0;
2798bc17de7cSEric Farman 
2799bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2800bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2801bc17de7cSEric Farman }
2802bc17de7cSEric Farman 
2803bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2804bc17de7cSEric Farman {
2805bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2806bc17de7cSEric Farman 		return 0;
2807bc17de7cSEric Farman 
2808bc17de7cSEric Farman 	/*
2809bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
28109977e886SHendrik Brueckner 	 * copying in vcpu load/put. We can simply call save_fpu_regs()
28119977e886SHendrik Brueckner 	 * to save the current register state because we are in the
28129977e886SHendrik Brueckner 	 * middle of a load/put cycle.
28139977e886SHendrik Brueckner 	 *
28149977e886SHendrik Brueckner 	 * Let's update our copies before we save it into the save area.
2815bc17de7cSEric Farman 	 */
2816d0164ee2SHendrik Brueckner 	save_fpu_regs();
2817bc17de7cSEric Farman 
2818bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2819bc17de7cSEric Farman }
2820bc17de7cSEric Farman 
28218ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28228ad35755SDavid Hildenbrand {
28238ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
28248e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
28258ad35755SDavid Hildenbrand }
28268ad35755SDavid Hildenbrand 
28278ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
28288ad35755SDavid Hildenbrand {
28298ad35755SDavid Hildenbrand 	unsigned int i;
28308ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
28318ad35755SDavid Hildenbrand 
28328ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
28338ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
28348ad35755SDavid Hildenbrand 	}
28358ad35755SDavid Hildenbrand }
28368ad35755SDavid Hildenbrand 
28378ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
28388ad35755SDavid Hildenbrand {
283909a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
284009a400e7SDavid Hildenbrand 		return;
28418ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
28428e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
28438ad35755SDavid Hildenbrand }
28448ad35755SDavid Hildenbrand 
28456852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
28466852d7b6SDavid Hildenbrand {
28478ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
28488ad35755SDavid Hildenbrand 
28498ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
28508ad35755SDavid Hildenbrand 		return;
28518ad35755SDavid Hildenbrand 
28526852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
28538ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2854433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
28558ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
28568ad35755SDavid Hildenbrand 
28578ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
28588ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
28598ad35755SDavid Hildenbrand 			started_vcpus++;
28608ad35755SDavid Hildenbrand 	}
28618ad35755SDavid Hildenbrand 
28628ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
28638ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
28648ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
28658ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
28668ad35755SDavid Hildenbrand 		/*
28678ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
28688ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
28698ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
28708ad35755SDavid Hildenbrand 		 */
28718ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
28728ad35755SDavid Hildenbrand 	}
28738ad35755SDavid Hildenbrand 
2874805de8f4SPeter Zijlstra 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
28758ad35755SDavid Hildenbrand 	/*
28768ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
28778ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
28788ad35755SDavid Hildenbrand 	 */
2879d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2880433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
28818ad35755SDavid Hildenbrand 	return;
28826852d7b6SDavid Hildenbrand }
28836852d7b6SDavid Hildenbrand 
28846852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
28856852d7b6SDavid Hildenbrand {
28868ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
28878ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
28888ad35755SDavid Hildenbrand 
28898ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
28908ad35755SDavid Hildenbrand 		return;
28918ad35755SDavid Hildenbrand 
28926852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
28938ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2894433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
28958ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
28968ad35755SDavid Hildenbrand 
289732f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
28986cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
289932f5ff63SDavid Hildenbrand 
2900805de8f4SPeter Zijlstra 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
29018ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
29028ad35755SDavid Hildenbrand 
29038ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
29048ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
29058ad35755SDavid Hildenbrand 			started_vcpus++;
29068ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
29078ad35755SDavid Hildenbrand 		}
29088ad35755SDavid Hildenbrand 	}
29098ad35755SDavid Hildenbrand 
29108ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
29118ad35755SDavid Hildenbrand 		/*
29128ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
29138ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
29148ad35755SDavid Hildenbrand 		 */
29158ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
29168ad35755SDavid Hildenbrand 	}
29178ad35755SDavid Hildenbrand 
2918433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
29198ad35755SDavid Hildenbrand 	return;
29206852d7b6SDavid Hildenbrand }
29216852d7b6SDavid Hildenbrand 
2922d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2923d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2924d6712df9SCornelia Huck {
2925d6712df9SCornelia Huck 	int r;
2926d6712df9SCornelia Huck 
2927d6712df9SCornelia Huck 	if (cap->flags)
2928d6712df9SCornelia Huck 		return -EINVAL;
2929d6712df9SCornelia Huck 
2930d6712df9SCornelia Huck 	switch (cap->cap) {
2931fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2932fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2933fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2934c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2935fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2936fa6b7fe9SCornelia Huck 		}
2937fa6b7fe9SCornelia Huck 		r = 0;
2938fa6b7fe9SCornelia Huck 		break;
2939d6712df9SCornelia Huck 	default:
2940d6712df9SCornelia Huck 		r = -EINVAL;
2941d6712df9SCornelia Huck 		break;
2942d6712df9SCornelia Huck 	}
2943d6712df9SCornelia Huck 	return r;
2944d6712df9SCornelia Huck }
2945d6712df9SCornelia Huck 
294641408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
294741408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
294841408c28SThomas Huth {
294941408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
295041408c28SThomas Huth 	void *tmpbuf = NULL;
295141408c28SThomas Huth 	int r, srcu_idx;
295241408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
295341408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
295441408c28SThomas Huth 
295541408c28SThomas Huth 	if (mop->flags & ~supported_flags)
295641408c28SThomas Huth 		return -EINVAL;
295741408c28SThomas Huth 
295841408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
295941408c28SThomas Huth 		return -E2BIG;
296041408c28SThomas Huth 
296141408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
296241408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
296341408c28SThomas Huth 		if (!tmpbuf)
296441408c28SThomas Huth 			return -ENOMEM;
296541408c28SThomas Huth 	}
296641408c28SThomas Huth 
296741408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
296841408c28SThomas Huth 
296941408c28SThomas Huth 	switch (mop->op) {
297041408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
297141408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
297292c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
297392c96321SDavid Hildenbrand 					    mop->size, GACC_FETCH);
297441408c28SThomas Huth 			break;
297541408c28SThomas Huth 		}
297641408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
297741408c28SThomas Huth 		if (r == 0) {
297841408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
297941408c28SThomas Huth 				r = -EFAULT;
298041408c28SThomas Huth 		}
298141408c28SThomas Huth 		break;
298241408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
298341408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
298492c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
298592c96321SDavid Hildenbrand 					    mop->size, GACC_STORE);
298641408c28SThomas Huth 			break;
298741408c28SThomas Huth 		}
298841408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
298941408c28SThomas Huth 			r = -EFAULT;
299041408c28SThomas Huth 			break;
299141408c28SThomas Huth 		}
299241408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
299341408c28SThomas Huth 		break;
299441408c28SThomas Huth 	default:
299541408c28SThomas Huth 		r = -EINVAL;
299641408c28SThomas Huth 	}
299741408c28SThomas Huth 
299841408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
299941408c28SThomas Huth 
300041408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
300141408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
300241408c28SThomas Huth 
300341408c28SThomas Huth 	vfree(tmpbuf);
300441408c28SThomas Huth 	return r;
300541408c28SThomas Huth }
300641408c28SThomas Huth 
3007b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
3008b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
3009b0c632dbSHeiko Carstens {
3010b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
3011b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
3012800c1065SThomas Huth 	int idx;
3013bc923cc9SAvi Kivity 	long r;
3014b0c632dbSHeiko Carstens 
301593736624SAvi Kivity 	switch (ioctl) {
301647b43c52SJens Freimann 	case KVM_S390_IRQ: {
301747b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
301847b43c52SJens Freimann 
301947b43c52SJens Freimann 		r = -EFAULT;
302047b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
302147b43c52SJens Freimann 			break;
302247b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
302347b43c52SJens Freimann 		break;
302447b43c52SJens Freimann 	}
302593736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
3026ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
3027383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
3028ba5c1e9bSCarsten Otte 
302993736624SAvi Kivity 		r = -EFAULT;
3030ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
303193736624SAvi Kivity 			break;
3032383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
3033383d0b05SJens Freimann 			return -EINVAL;
3034383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
303593736624SAvi Kivity 		break;
3036ba5c1e9bSCarsten Otte 	}
3037b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
3038800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
3039bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
3040800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3041bc923cc9SAvi Kivity 		break;
3042b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
3043b0c632dbSHeiko Carstens 		psw_t psw;
3044b0c632dbSHeiko Carstens 
3045bc923cc9SAvi Kivity 		r = -EFAULT;
3046b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
3047bc923cc9SAvi Kivity 			break;
3048bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3049bc923cc9SAvi Kivity 		break;
3050b0c632dbSHeiko Carstens 	}
3051b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
3052bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3053bc923cc9SAvi Kivity 		break;
305414eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
305514eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
305614eebd91SCarsten Otte 		struct kvm_one_reg reg;
305714eebd91SCarsten Otte 		r = -EFAULT;
305814eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
305914eebd91SCarsten Otte 			break;
306014eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
306114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
306214eebd91SCarsten Otte 		else
306314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
306414eebd91SCarsten Otte 		break;
306514eebd91SCarsten Otte 	}
306627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
306727e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
306827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
306927e0393fSCarsten Otte 
307027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
307127e0393fSCarsten Otte 			r = -EFAULT;
307227e0393fSCarsten Otte 			break;
307327e0393fSCarsten Otte 		}
307427e0393fSCarsten Otte 
307527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
307627e0393fSCarsten Otte 			r = -EINVAL;
307727e0393fSCarsten Otte 			break;
307827e0393fSCarsten Otte 		}
307927e0393fSCarsten Otte 
308027e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
308127e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
308227e0393fSCarsten Otte 		break;
308327e0393fSCarsten Otte 	}
308427e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
308527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
308627e0393fSCarsten Otte 
308727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
308827e0393fSCarsten Otte 			r = -EFAULT;
308927e0393fSCarsten Otte 			break;
309027e0393fSCarsten Otte 		}
309127e0393fSCarsten Otte 
309227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
309327e0393fSCarsten Otte 			r = -EINVAL;
309427e0393fSCarsten Otte 			break;
309527e0393fSCarsten Otte 		}
309627e0393fSCarsten Otte 
309727e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
309827e0393fSCarsten Otte 			ucasmap.length);
309927e0393fSCarsten Otte 		break;
310027e0393fSCarsten Otte 	}
310127e0393fSCarsten Otte #endif
3102ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
3103527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
3104ccc7910fSCarsten Otte 		break;
3105ccc7910fSCarsten Otte 	}
3106d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
3107d6712df9SCornelia Huck 	{
3108d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
3109d6712df9SCornelia Huck 		r = -EFAULT;
3110d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
3111d6712df9SCornelia Huck 			break;
3112d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3113d6712df9SCornelia Huck 		break;
3114d6712df9SCornelia Huck 	}
311541408c28SThomas Huth 	case KVM_S390_MEM_OP: {
311641408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
311741408c28SThomas Huth 
311841408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
311941408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
312041408c28SThomas Huth 		else
312141408c28SThomas Huth 			r = -EFAULT;
312241408c28SThomas Huth 		break;
312341408c28SThomas Huth 	}
3124816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
3125816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3126816c7667SJens Freimann 
3127816c7667SJens Freimann 		r = -EFAULT;
3128816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3129816c7667SJens Freimann 			break;
3130816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3131816c7667SJens Freimann 		    irq_state.len == 0 ||
3132816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3133816c7667SJens Freimann 			r = -EINVAL;
3134816c7667SJens Freimann 			break;
3135816c7667SJens Freimann 		}
3136816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
3137816c7667SJens Freimann 					   (void __user *) irq_state.buf,
3138816c7667SJens Freimann 					   irq_state.len);
3139816c7667SJens Freimann 		break;
3140816c7667SJens Freimann 	}
3141816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
3142816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
3143816c7667SJens Freimann 
3144816c7667SJens Freimann 		r = -EFAULT;
3145816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3146816c7667SJens Freimann 			break;
3147816c7667SJens Freimann 		if (irq_state.len == 0) {
3148816c7667SJens Freimann 			r = -EINVAL;
3149816c7667SJens Freimann 			break;
3150816c7667SJens Freimann 		}
3151816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
3152816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
3153816c7667SJens Freimann 					   irq_state.len);
3154816c7667SJens Freimann 		break;
3155816c7667SJens Freimann 	}
3156b0c632dbSHeiko Carstens 	default:
31573e6afcf1SCarsten Otte 		r = -ENOTTY;
3158b0c632dbSHeiko Carstens 	}
3159bc923cc9SAvi Kivity 	return r;
3160b0c632dbSHeiko Carstens }
3161b0c632dbSHeiko Carstens 
31625b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
31635b1c1493SCarsten Otte {
31645b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
31655b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
31665b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
31675b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
31685b1c1493SCarsten Otte 		get_page(vmf->page);
31695b1c1493SCarsten Otte 		return 0;
31705b1c1493SCarsten Otte 	}
31715b1c1493SCarsten Otte #endif
31725b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
31735b1c1493SCarsten Otte }
31745b1c1493SCarsten Otte 
31755587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
31765587027cSAneesh Kumar K.V 			    unsigned long npages)
3177db3fe4ebSTakuya Yoshikawa {
3178db3fe4ebSTakuya Yoshikawa 	return 0;
3179db3fe4ebSTakuya Yoshikawa }
3180db3fe4ebSTakuya Yoshikawa 
3181b0c632dbSHeiko Carstens /* Section: memory related */
3182f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
3183f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
318409170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
31857b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
3186b0c632dbSHeiko Carstens {
3187dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
3188dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
3189dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
3190dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
3191b0c632dbSHeiko Carstens 
3192598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
3193b0c632dbSHeiko Carstens 		return -EINVAL;
3194b0c632dbSHeiko Carstens 
3195598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
3196b0c632dbSHeiko Carstens 		return -EINVAL;
3197b0c632dbSHeiko Carstens 
3198a3a92c31SDominik Dingel 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3199a3a92c31SDominik Dingel 		return -EINVAL;
3200a3a92c31SDominik Dingel 
3201f7784b8eSMarcelo Tosatti 	return 0;
3202f7784b8eSMarcelo Tosatti }
3203f7784b8eSMarcelo Tosatti 
3204f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
320509170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
32068482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
3207f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
32088482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
3209f7784b8eSMarcelo Tosatti {
3210f7850c92SCarsten Otte 	int rc;
3211f7784b8eSMarcelo Tosatti 
32122cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
32132cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
32142cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
32152cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
32162cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
32172cef4debSChristian Borntraeger 	 */
32182cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
32192cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
32202cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
32212cef4debSChristian Borntraeger 		return;
3222598841caSCarsten Otte 
3223598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3224598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
3225598841caSCarsten Otte 	if (rc)
3226ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
3227598841caSCarsten Otte 	return;
3228b0c632dbSHeiko Carstens }
3229b0c632dbSHeiko Carstens 
323060a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
323160a37709SAlexander Yarygin {
323260a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
323360a37709SAlexander Yarygin 
323460a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
323560a37709SAlexander Yarygin }
323660a37709SAlexander Yarygin 
32373491caf2SChristian Borntraeger void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
32383491caf2SChristian Borntraeger {
32393491caf2SChristian Borntraeger 	vcpu->valid_wakeup = false;
32403491caf2SChristian Borntraeger }
32413491caf2SChristian Borntraeger 
3242b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
3243b0c632dbSHeiko Carstens {
324460a37709SAlexander Yarygin 	int i;
324560a37709SAlexander Yarygin 
324607197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
324707197fd0SDavid Hildenbrand 		pr_info("SIE not available\n");
324807197fd0SDavid Hildenbrand 		return -ENODEV;
324907197fd0SDavid Hildenbrand 	}
325007197fd0SDavid Hildenbrand 
325160a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
325260a37709SAlexander Yarygin 		kvm_s390_fac_list_mask[i] |=
325360a37709SAlexander Yarygin 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
325460a37709SAlexander Yarygin 
32559d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3256b0c632dbSHeiko Carstens }
3257b0c632dbSHeiko Carstens 
3258b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
3259b0c632dbSHeiko Carstens {
3260b0c632dbSHeiko Carstens 	kvm_exit();
3261b0c632dbSHeiko Carstens }
3262b0c632dbSHeiko Carstens 
3263b0c632dbSHeiko Carstens module_init(kvm_s390_init);
3264b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
3265566af940SCornelia Huck 
3266566af940SCornelia Huck /*
3267566af940SCornelia Huck  * Enable autoloading of the kvm module.
3268566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3269566af940SCornelia Huck  * since x86 takes a different approach.
3270566af940SCornelia Huck  */
3271566af940SCornelia Huck #include <linux/miscdevice.h>
3272566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
3273566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
3274