xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 81a1cf9f89a6b71e71bfd7d43837ce9235e70b38)
1d809aa23SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b0c632dbSHeiko Carstens /*
3bb64da9aSChristian Borntraeger  * hosting IBM Z kernel virtual machines (s390x)
4b0c632dbSHeiko Carstens  *
53e6c5568SJanosch Frank  * Copyright IBM Corp. 2008, 2020
6b0c632dbSHeiko Carstens  *
7b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
9628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1015f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
11b0c632dbSHeiko Carstens  */
12b0c632dbSHeiko Carstens 
137aedd9d4SMichael Mueller #define KMSG_COMPONENT "kvm-s390"
147aedd9d4SMichael Mueller #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
157aedd9d4SMichael Mueller 
16b0c632dbSHeiko Carstens #include <linux/compiler.h>
17b0c632dbSHeiko Carstens #include <linux/err.h>
18b0c632dbSHeiko Carstens #include <linux/fs.h>
19ca872302SChristian Borntraeger #include <linux/hrtimer.h>
20b0c632dbSHeiko Carstens #include <linux/init.h>
21b0c632dbSHeiko Carstens #include <linux/kvm.h>
22b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
23b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25d3217967SPaul Gortmaker #include <linux/moduleparam.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31174cd4b1SIngo Molnar #include <linux/sched/signal.h>
32190df4a2SClaudio Imbrenda #include <linux/string.h>
3365fddcfcSMike Rapoport #include <linux/pgtable.h>
34ca2fd060SClaudio Imbrenda #include <linux/mmu_notifier.h>
35174cd4b1SIngo Molnar 
36cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
37b0c632dbSHeiko Carstens #include <asm/lowcore.h>
38fd5ada04SMartin Schwidefsky #include <asm/stp.h>
391e133ab2SMartin Schwidefsky #include <asm/gmap.h>
40f5daba1dSHeiko Carstens #include <asm/nmi.h>
41a0616cdeSDavid Howells #include <asm/switch_to.h>
426d3da241SJens Freimann #include <asm/isc.h>
431526bf9cSChristian Borntraeger #include <asm/sclp.h>
440a763c78SDavid Hildenbrand #include <asm/cpacf.h>
45221bb8a4SLinus Torvalds #include <asm/timex.h>
46e585b24aSTony Krowiak #include <asm/ap.h>
4729b40f10SJanosch Frank #include <asm/uv.h>
4856e62a73SSven Schnelle #include <asm/fpu/api.h>
498f2abe6aSChristian Borntraeger #include "kvm-s390.h"
50b0c632dbSHeiko Carstens #include "gaccess.h"
5198b1d33dSMatthew Rosato #include "pci.h"
52b0c632dbSHeiko Carstens 
535786fffaSCornelia Huck #define CREATE_TRACE_POINTS
545786fffaSCornelia Huck #include "trace.h"
55ade38c31SCornelia Huck #include "trace-s390.h"
565786fffaSCornelia Huck 
5741408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
58816c7667SJens Freimann #define LOCAL_IRQS 32
59816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
6141408c28SThomas Huth 
62fcfe1baeSJing Zhang const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63fcfe1baeSJing Zhang 	KVM_GENERIC_VM_STATS(),
64fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_io),
65fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_float_mchk),
66fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_pfault_done),
67fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_service_signal),
6873f91b00SMatthew Rosato 	STATS_DESC_COUNTER(VM, inject_virtio),
6973f91b00SMatthew Rosato 	STATS_DESC_COUNTER(VM, aen_forward)
70fcfe1baeSJing Zhang };
71fcfe1baeSJing Zhang 
72fcfe1baeSJing Zhang const struct kvm_stats_header kvm_vm_stats_header = {
73fcfe1baeSJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
74fcfe1baeSJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75fcfe1baeSJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
76fcfe1baeSJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77fcfe1baeSJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78fcfe1baeSJing Zhang 		       sizeof(kvm_vm_stats_desc),
79fcfe1baeSJing Zhang };
80fcfe1baeSJing Zhang 
81ce55c049SJing Zhang const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82ce55c049SJing Zhang 	KVM_GENERIC_VCPU_STATS(),
83ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_userspace),
84ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_null),
85ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_request),
86ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_io_request),
87ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
89ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_validity),
90ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instruction),
91ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_pei),
92ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
94ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
96ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
97ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
101ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
102ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
103ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
106ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_program),
110ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_io),
111ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
113ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_ckc),
114ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_cputm),
115ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_external_call),
116ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_mchk),
118ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_program),
120ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_restart),
121ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
124ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_gs),
125ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
126ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
130ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sck),
131ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
133ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_spx),
134ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
135ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stap),
136ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_iske),
137ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ri),
138ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sske),
140ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
142ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
143ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tb),
144ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
145ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
146ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
147ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sie),
148ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_essa),
149ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
166bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
167bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
168bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
169bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
170bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
171bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
172bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
173bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
174bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
175ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, pfault_sync)
176ce55c049SJing Zhang };
177ce55c049SJing Zhang 
178ce55c049SJing Zhang const struct kvm_stats_header kvm_vcpu_stats_header = {
179ce55c049SJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
180ce55c049SJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
181ce55c049SJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
182ce55c049SJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
183ce55c049SJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
184ce55c049SJing Zhang 		       sizeof(kvm_vcpu_stats_desc),
185ce55c049SJing Zhang };
186ce55c049SJing Zhang 
187a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
188a411edf1SDavid Hildenbrand static int nested;
189a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
190a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
191a411edf1SDavid Hildenbrand 
192a4499382SJanosch Frank /* allow 1m huge page guest backing, if !nested */
193a4499382SJanosch Frank static int hpage;
194a4499382SJanosch Frank module_param(hpage, int, 0444);
195a4499382SJanosch Frank MODULE_PARM_DESC(hpage, "1m huge page backing support");
196b0c632dbSHeiko Carstens 
1978b905d28SChristian Borntraeger /* maximum percentage of steal time for polling.  >100 is treated like 100 */
1988b905d28SChristian Borntraeger static u8 halt_poll_max_steal = 10;
1998b905d28SChristian Borntraeger module_param(halt_poll_max_steal, byte, 0644);
200b41fb528SWei Yongjun MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
2018b905d28SChristian Borntraeger 
202cc674ef2SMichael Mueller /* if set to true, the GISA will be initialized and used if available */
203cc674ef2SMichael Mueller static bool use_gisa  = true;
204cc674ef2SMichael Mueller module_param(use_gisa, bool, 0644);
205cc674ef2SMichael Mueller MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
206cc674ef2SMichael Mueller 
20787e28a15SPierre Morel /* maximum diag9c forwarding per second */
20887e28a15SPierre Morel unsigned int diag9c_forwarding_hz;
20987e28a15SPierre Morel module_param(diag9c_forwarding_hz, uint, 0644);
21087e28a15SPierre Morel MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
21187e28a15SPierre Morel 
212c3b9e3e1SChristian Borntraeger /*
213cc726886SClaudio Imbrenda  * allow asynchronous deinit for protected guests; enable by default since
214cc726886SClaudio Imbrenda  * the feature is opt-in anyway
215cc726886SClaudio Imbrenda  */
216cc726886SClaudio Imbrenda static int async_destroy = 1;
217cc726886SClaudio Imbrenda module_param(async_destroy, int, 0444);
218cc726886SClaudio Imbrenda MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
219fb491d55SClaudio Imbrenda 
220c3b9e3e1SChristian Borntraeger /*
221c3b9e3e1SChristian Borntraeger  * For now we handle at most 16 double words as this is what the s390 base
222c3b9e3e1SChristian Borntraeger  * kernel handles and stores in the prefix page. If we ever need to go beyond
223c3b9e3e1SChristian Borntraeger  * this, this requires changes to code, but the external uapi can stay.
224c3b9e3e1SChristian Borntraeger  */
225c3b9e3e1SChristian Borntraeger #define SIZE_INTERNAL 16
226c3b9e3e1SChristian Borntraeger 
227c3b9e3e1SChristian Borntraeger /*
228c3b9e3e1SChristian Borntraeger  * Base feature mask that defines default mask for facilities. Consists of the
229c3b9e3e1SChristian Borntraeger  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
230c3b9e3e1SChristian Borntraeger  */
231c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
232c3b9e3e1SChristian Borntraeger /*
233c3b9e3e1SChristian Borntraeger  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
234c3b9e3e1SChristian Borntraeger  * and defines the facilities that can be enabled via a cpu model.
235c3b9e3e1SChristian Borntraeger  */
236c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
237c3b9e3e1SChristian Borntraeger 
238c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_size(void)
23978c4b59fSMichael Mueller {
240c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
241c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
242c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
24317e89e13SSven Schnelle 		sizeof(stfle_fac_list));
244c3b9e3e1SChristian Borntraeger 
245c3b9e3e1SChristian Borntraeger 	return SIZE_INTERNAL;
24678c4b59fSMichael Mueller }
24778c4b59fSMichael Mueller 
24815c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
24915c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
2500a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
2510a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
25215c9705fSDavid Hildenbrand 
2539d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
254a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
25578f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
2563e6c5568SJanosch Frank debug_info_t *kvm_s390_dbf_uv;
2579d8d5786SMichael Mueller 
258b0c632dbSHeiko Carstens /* Section: not file related */
25913a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
260b0c632dbSHeiko Carstens {
261b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
26210474ae8SAlexander Graf 	return 0;
263b0c632dbSHeiko Carstens }
264b0c632dbSHeiko Carstens 
26529b40f10SJanosch Frank /* forward declarations */
266414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
267414d3b07SMartin Schwidefsky 			      unsigned long end);
26829b40f10SJanosch Frank static int sca_switch_to_extended(struct kvm *kvm);
2692c70fe44SChristian Borntraeger 
2701575767eSDavid Hildenbrand static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
2711575767eSDavid Hildenbrand {
2721575767eSDavid Hildenbrand 	u8 delta_idx = 0;
2731575767eSDavid Hildenbrand 
2741575767eSDavid Hildenbrand 	/*
2751575767eSDavid Hildenbrand 	 * The TOD jumps by delta, we have to compensate this by adding
2761575767eSDavid Hildenbrand 	 * -delta to the epoch.
2771575767eSDavid Hildenbrand 	 */
2781575767eSDavid Hildenbrand 	delta = -delta;
2791575767eSDavid Hildenbrand 
2801575767eSDavid Hildenbrand 	/* sign-extension - we're adding to signed values below */
2811575767eSDavid Hildenbrand 	if ((s64)delta < 0)
2821575767eSDavid Hildenbrand 		delta_idx = -1;
2831575767eSDavid Hildenbrand 
2841575767eSDavid Hildenbrand 	scb->epoch += delta;
2851575767eSDavid Hildenbrand 	if (scb->ecd & ECD_MEF) {
2861575767eSDavid Hildenbrand 		scb->epdx += delta_idx;
2871575767eSDavid Hildenbrand 		if (scb->epoch < delta)
2881575767eSDavid Hildenbrand 			scb->epdx += 1;
2891575767eSDavid Hildenbrand 	}
2901575767eSDavid Hildenbrand }
2911575767eSDavid Hildenbrand 
292fdf03650SFan Zhang /*
293fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
294fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
295fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
296fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
297fdf03650SFan Zhang  */
298fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
299fdf03650SFan Zhang 			  void *v)
300fdf03650SFan Zhang {
301fdf03650SFan Zhang 	struct kvm *kvm;
302fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
30346808a4cSMarc Zyngier 	unsigned long i;
304fdf03650SFan Zhang 	unsigned long long *delta = v;
305fdf03650SFan Zhang 
306fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
307fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
3081575767eSDavid Hildenbrand 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
3091575767eSDavid Hildenbrand 			if (i == 0) {
3101575767eSDavid Hildenbrand 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
3111575767eSDavid Hildenbrand 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
3121575767eSDavid Hildenbrand 			}
313db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
314db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
31591473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
3161575767eSDavid Hildenbrand 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
3171575767eSDavid Hildenbrand 						   *delta);
318fdf03650SFan Zhang 		}
319fdf03650SFan Zhang 	}
320fdf03650SFan Zhang 	return NOTIFY_OK;
321fdf03650SFan Zhang }
322fdf03650SFan Zhang 
323fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
324fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
325fdf03650SFan Zhang };
326fdf03650SFan Zhang 
32722be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
32822be5a13SDavid Hildenbrand {
32922be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
33022be5a13SDavid Hildenbrand }
33122be5a13SDavid Hildenbrand 
3320a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
3330a763c78SDavid Hildenbrand {
3344fa3b91bSHeiko Carstens 	unsigned long function = (unsigned long)nr | 0x100;
335d051ae53SHeiko Carstens 	int cc;
3360a763c78SDavid Hildenbrand 
3370a763c78SDavid Hildenbrand 	asm volatile(
3384fa3b91bSHeiko Carstens 		"	lgr	0,%[function]\n"
3390a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
3400a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
3410a763c78SDavid Hildenbrand 		"	ipm	%0\n"
3420a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
3430a763c78SDavid Hildenbrand 		: "=d" (cc)
3444fa3b91bSHeiko Carstens 		: [function] "d" (function)
3454fa3b91bSHeiko Carstens 		: "cc", "0");
3460a763c78SDavid Hildenbrand 	return cc == 0;
3470a763c78SDavid Hildenbrand }
3480a763c78SDavid Hildenbrand 
349d0dea733SHeiko Carstens static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
350d6681397SChristian Borntraeger {
351d6681397SChristian Borntraeger 	asm volatile(
3524fa3b91bSHeiko Carstens 		"	lghi	0,0\n"
3534fa3b91bSHeiko Carstens 		"	lgr	1,%[query]\n"
3544fa3b91bSHeiko Carstens 		/* Parameter registers are ignored */
355d6681397SChristian Borntraeger 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
356b1c41ac3SHeiko Carstens 		:
3574fa3b91bSHeiko Carstens 		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
3584fa3b91bSHeiko Carstens 		: "cc", "memory", "0", "1");
359d6681397SChristian Borntraeger }
360d6681397SChristian Borntraeger 
361173aec2dSChristian Borntraeger #define INSN_SORTL 0xb938
3624f45b90eSChristian Borntraeger #define INSN_DFLTCC 0xb939
363173aec2dSChristian Borntraeger 
3646c30cd2eSSean Christopherson static void __init kvm_s390_cpu_feat_init(void)
36522be5a13SDavid Hildenbrand {
3660a763c78SDavid Hildenbrand 	int i;
3670a763c78SDavid Hildenbrand 
3680a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
3690a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
3700a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
3710a763c78SDavid Hildenbrand 	}
3720a763c78SDavid Hildenbrand 
3730a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
374221bb8a4SLinus Torvalds 		ptff(kvm_s390_available_subfunc.ptff,
375221bb8a4SLinus Torvalds 		     sizeof(kvm_s390_available_subfunc.ptff),
376221bb8a4SLinus Torvalds 		     PTFF_QAF);
3770a763c78SDavid Hildenbrand 
3780a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
37969c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
38069c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmac);
38169c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
38269c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmc);
38369c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
38469c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.km);
38569c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
38669c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kimd);
38769c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
38869c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.klmd);
3890a763c78SDavid Hildenbrand 	}
3900a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
39169c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
39269c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pckmo);
3930a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
39469c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
39569c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmctr);
39669c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
39769c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmf);
39869c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
39969c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmo);
40069c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
40169c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pcc);
4020a763c78SDavid Hildenbrand 	}
4030a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
404985a9d20SHarald Freudenberger 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
40569c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.ppno);
4060a763c78SDavid Hildenbrand 
407e000b8e0SJason J. Herne 	if (test_facility(146)) /* MSA8 */
408e000b8e0SJason J. Herne 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
409e000b8e0SJason J. Herne 			      kvm_s390_available_subfunc.kma);
410e000b8e0SJason J. Herne 
41113209ad0SChristian Borntraeger 	if (test_facility(155)) /* MSA9 */
41213209ad0SChristian Borntraeger 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
41313209ad0SChristian Borntraeger 			      kvm_s390_available_subfunc.kdsa);
41413209ad0SChristian Borntraeger 
415173aec2dSChristian Borntraeger 	if (test_facility(150)) /* SORTL */
416173aec2dSChristian Borntraeger 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
417173aec2dSChristian Borntraeger 
4184f45b90eSChristian Borntraeger 	if (test_facility(151)) /* DFLTCC */
4194f45b90eSChristian Borntraeger 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
4204f45b90eSChristian Borntraeger 
42122be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
42222be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
423a3508fbeSDavid Hildenbrand 	/*
424a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
425a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
426a3508fbeSDavid Hildenbrand 	 */
427a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
428a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
429a3508fbeSDavid Hildenbrand 		return;
430a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
43119c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
43219c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
4330615a326SDavid Hildenbrand 	if (sclp.has_siif)
4340615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
43577d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
43677d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
437a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
438a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
4395630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
4405630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
44113ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
44213ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
4437fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
4447fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
445730cd632SFarhan Ali 	if (sclp.has_kss)
446730cd632SFarhan Ali 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
4475d3876a8SDavid Hildenbrand 	/*
4485d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
4495d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
4505d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
4515d3876a8SDavid Hildenbrand 	 *
4525d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
4535d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
4545d3876a8SDavid Hildenbrand 	 *
4555d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
4565d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
4575d3876a8SDavid Hildenbrand 	 *
4585d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
4595d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
4605d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
4615d3876a8SDavid Hildenbrand 	 *
4625d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
4635d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
4645d3876a8SDavid Hildenbrand 	 */
46522be5a13SDavid Hildenbrand }
46622be5a13SDavid Hildenbrand 
4676c30cd2eSSean Christopherson static int __init __kvm_s390_init(void)
468b0c632dbSHeiko Carstens {
469f76f6371SJanosch Frank 	int rc = -ENOMEM;
470308c3e66SMichael Mueller 
47178f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
47278f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
47378f26131SChristian Borntraeger 		return -ENOMEM;
47478f26131SChristian Borntraeger 
4753e6c5568SJanosch Frank 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
4763e6c5568SJanosch Frank 	if (!kvm_s390_dbf_uv)
477b801ef42SSean Christopherson 		goto err_kvm_uv;
4783e6c5568SJanosch Frank 
4793e6c5568SJanosch Frank 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
4803e6c5568SJanosch Frank 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
481b801ef42SSean Christopherson 		goto err_debug_view;
48278f26131SChristian Borntraeger 
48322be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
48422be5a13SDavid Hildenbrand 
48584877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
486308c3e66SMichael Mueller 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
487308c3e66SMichael Mueller 	if (rc) {
4888d43d570SMichael Mueller 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
489b801ef42SSean Christopherson 		goto err_flic;
490308c3e66SMichael Mueller 	}
491b1d1e76eSMichael Mueller 
492189e7d87SMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
49398b1d33dSMatthew Rosato 		rc = kvm_s390_pci_init();
49498b1d33dSMatthew Rosato 		if (rc) {
49598b1d33dSMatthew Rosato 			pr_err("Unable to allocate AIFT for PCI\n");
496b801ef42SSean Christopherson 			goto err_pci;
49798b1d33dSMatthew Rosato 		}
49898b1d33dSMatthew Rosato 	}
49998b1d33dSMatthew Rosato 
500b1d1e76eSMichael Mueller 	rc = kvm_s390_gib_init(GAL_ISC);
501b1d1e76eSMichael Mueller 	if (rc)
502b801ef42SSean Christopherson 		goto err_gib;
503b1d1e76eSMichael Mueller 
504e43f5762SSean Christopherson 	gmap_notifier.notifier_call = kvm_gmap_notifier;
505e43f5762SSean Christopherson 	gmap_register_pte_notifier(&gmap_notifier);
506e43f5762SSean Christopherson 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
507e43f5762SSean Christopherson 	gmap_register_pte_notifier(&vsie_gmap_notifier);
508e43f5762SSean Christopherson 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
509e43f5762SSean Christopherson 				       &kvm_clock_notifier);
510e43f5762SSean Christopherson 
511308c3e66SMichael Mueller 	return 0;
512308c3e66SMichael Mueller 
513b801ef42SSean Christopherson err_gib:
514b801ef42SSean Christopherson 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
515b801ef42SSean Christopherson 		kvm_s390_pci_exit();
516b801ef42SSean Christopherson err_pci:
517b801ef42SSean Christopherson err_flic:
518b801ef42SSean Christopherson err_debug_view:
519b801ef42SSean Christopherson 	debug_unregister(kvm_s390_dbf_uv);
520b801ef42SSean Christopherson err_kvm_uv:
521b801ef42SSean Christopherson 	debug_unregister(kvm_s390_dbf);
522308c3e66SMichael Mueller 	return rc;
523b0c632dbSHeiko Carstens }
524b0c632dbSHeiko Carstens 
525b8449265SSean Christopherson static void __kvm_s390_exit(void)
52678f26131SChristian Borntraeger {
527e43f5762SSean Christopherson 	gmap_unregister_pte_notifier(&gmap_notifier);
528e43f5762SSean Christopherson 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
529e43f5762SSean Christopherson 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
530e43f5762SSean Christopherson 					 &kvm_clock_notifier);
531e43f5762SSean Christopherson 
5321282c21eSMichael Mueller 	kvm_s390_gib_destroy();
533189e7d87SMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
53498b1d33dSMatthew Rosato 		kvm_s390_pci_exit();
53578f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
5363e6c5568SJanosch Frank 	debug_unregister(kvm_s390_dbf_uv);
53778f26131SChristian Borntraeger }
53878f26131SChristian Borntraeger 
539b0c632dbSHeiko Carstens /* Section: device related */
540b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
541b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
542b0c632dbSHeiko Carstens {
543b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
544b0c632dbSHeiko Carstens 		return s390_enable_sie();
545b0c632dbSHeiko Carstens 	return -EINVAL;
546b0c632dbSHeiko Carstens }
547b0c632dbSHeiko Carstens 
548784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
549b0c632dbSHeiko Carstens {
550d7b0b5ebSCarsten Otte 	int r;
551d7b0b5ebSCarsten Otte 
5522bd0ac4eSCarsten Otte 	switch (ext) {
553d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
554b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
55552e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
5561efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
5571efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
5581efd0f59SCarsten Otte #endif
5593c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
56060b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
56114eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
562d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
563fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
56410ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
565c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
56678599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
567f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
5686352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
569460df4c1SPaolo Bonzini 	case KVM_CAP_IMMEDIATE_EXIT:
57047b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
5712444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
572e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
57330ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
574816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
5756502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
5764036e387SClaudio Imbrenda 	case KVM_CAP_S390_CMMA_MIGRATION:
57747a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
578da9a1446SChristian Borntraeger 	case KVM_CAP_S390_AIS_MIGRATION:
5797de3f142SJanosch Frank 	case KVM_CAP_S390_VCPU_RESETS:
580b9b2782cSPeter Xu 	case KVM_CAP_SET_GUEST_DEBUG:
58123a60f83SCollin Walling 	case KVM_CAP_S390_DIAG318:
582d004079eSJanis Schoetterl-Glausch 	case KVM_CAP_S390_MEM_OP_EXTENSION:
583d7b0b5ebSCarsten Otte 		r = 1;
584d7b0b5ebSCarsten Otte 		break;
585a43b80b7SMaxim Levitsky 	case KVM_CAP_SET_GUEST_DEBUG2:
586a43b80b7SMaxim Levitsky 		r = KVM_GUESTDBG_VALID_MASK;
587a43b80b7SMaxim Levitsky 		break;
588a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
589a4499382SJanosch Frank 		r = 0;
59040ebdb8eSJanosch Frank 		if (hpage && !kvm_is_ucontrol(kvm))
591a4499382SJanosch Frank 			r = 1;
592a4499382SJanosch Frank 		break;
59341408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
59441408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
59541408c28SThomas Huth 		break;
596e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
597e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
598a86cb413SThomas Huth 	case KVM_CAP_MAX_VCPU_ID:
59976a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
600a6940674SDavid Hildenbrand 		if (!kvm_s390_use_sca_entries())
601a6940674SDavid Hildenbrand 			r = KVM_MAX_VCPUS;
602a6940674SDavid Hildenbrand 		else if (sclp.has_esca && sclp.has_64bscao)
60376a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
60482cc27efSVitaly Kuznetsov 		if (ext == KVM_CAP_NR_VCPUS)
60582cc27efSVitaly Kuznetsov 			r = min_t(unsigned int, num_online_cpus(), r);
606e726b1bdSChristian Borntraeger 		break;
6071526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
608abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
6091526bf9cSChristian Borntraeger 		break;
61068c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
61168c55750SEric Farman 		r = MACHINE_HAS_VX;
61268c55750SEric Farman 		break;
613c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
614c6e5f166SFan Zhang 		r = test_facility(64);
615c6e5f166SFan Zhang 		break;
6164e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
6174e0b1ab7SFan Zhang 		r = test_facility(133);
6184e0b1ab7SFan Zhang 		break;
61935b3fde6SChristian Borntraeger 	case KVM_CAP_S390_BPB:
62035b3fde6SChristian Borntraeger 		r = test_facility(82);
62135b3fde6SChristian Borntraeger 		break;
6228c516b25SClaudio Imbrenda 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
6238c516b25SClaudio Imbrenda 		r = async_destroy && is_prot_virt_host();
6248c516b25SClaudio Imbrenda 		break;
62513da9ae1SChristian Borntraeger 	case KVM_CAP_S390_PROTECTED:
62613da9ae1SChristian Borntraeger 		r = is_prot_virt_host();
62713da9ae1SChristian Borntraeger 		break;
628e9bf3acbSJanosch Frank 	case KVM_CAP_S390_PROTECTED_DUMP: {
629e9bf3acbSJanosch Frank 		u64 pv_cmds_dump[] = {
630e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_INIT,
631e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
632e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_CPU,
633e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_COMPLETE,
634e9bf3acbSJanosch Frank 		};
635e9bf3acbSJanosch Frank 		int i;
636e9bf3acbSJanosch Frank 
637e9bf3acbSJanosch Frank 		r = is_prot_virt_host();
638e9bf3acbSJanosch Frank 
639e9bf3acbSJanosch Frank 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
640e9bf3acbSJanosch Frank 			if (!test_bit_inv(pv_cmds_dump[i],
641e9bf3acbSJanosch Frank 					  (unsigned long *)&uv_info.inst_calls_list)) {
642e9bf3acbSJanosch Frank 				r = 0;
643e9bf3acbSJanosch Frank 				break;
644e9bf3acbSJanosch Frank 			}
645e9bf3acbSJanosch Frank 		}
646e9bf3acbSJanosch Frank 		break;
647e9bf3acbSJanosch Frank 	}
648db1c875eSMatthew Rosato 	case KVM_CAP_S390_ZPCI_OP:
649db1c875eSMatthew Rosato 		r = kvm_s390_pci_interp_allowed();
650db1c875eSMatthew Rosato 		break;
651f5ecfee9SPierre Morel 	case KVM_CAP_S390_CPU_TOPOLOGY:
652f5ecfee9SPierre Morel 		r = test_facility(11);
653f5ecfee9SPierre Morel 		break;
6542bd0ac4eSCarsten Otte 	default:
655d7b0b5ebSCarsten Otte 		r = 0;
656b0c632dbSHeiko Carstens 	}
657d7b0b5ebSCarsten Otte 	return r;
6582bd0ac4eSCarsten Otte }
659b0c632dbSHeiko Carstens 
6600dff0846SSean Christopherson void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
66115f36ebdSJason J. Herne {
6620959e168SJanosch Frank 	int i;
66315f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
6640959e168SJanosch Frank 	unsigned long gaddr, vmaddr;
66515f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
6660959e168SJanosch Frank 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
66715f36ebdSJason J. Herne 
6680959e168SJanosch Frank 	/* Loop over all guest segments */
6690959e168SJanosch Frank 	cur_gfn = memslot->base_gfn;
67015f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
6710959e168SJanosch Frank 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
6720959e168SJanosch Frank 		gaddr = gfn_to_gpa(cur_gfn);
6730959e168SJanosch Frank 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
6740959e168SJanosch Frank 		if (kvm_is_error_hva(vmaddr))
6750959e168SJanosch Frank 			continue;
67615f36ebdSJason J. Herne 
6770959e168SJanosch Frank 		bitmap_zero(bitmap, _PAGE_ENTRIES);
6780959e168SJanosch Frank 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
6790959e168SJanosch Frank 		for (i = 0; i < _PAGE_ENTRIES; i++) {
6800959e168SJanosch Frank 			if (test_bit(i, bitmap))
6810959e168SJanosch Frank 				mark_page_dirty(kvm, cur_gfn + i);
6820959e168SJanosch Frank 		}
6830959e168SJanosch Frank 
6841763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
6851763f8d0SChristian Borntraeger 			return;
68670c88a00SChristian Borntraeger 		cond_resched();
68715f36ebdSJason J. Herne 	}
68815f36ebdSJason J. Herne }
68915f36ebdSJason J. Herne 
690b0c632dbSHeiko Carstens /* Section: vm related */
691a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
692a6e2f683SEugene (jno) Dvurechenski 
693b0c632dbSHeiko Carstens /*
694b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
695b0c632dbSHeiko Carstens  */
696b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
697b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
698b0c632dbSHeiko Carstens {
69915f36ebdSJason J. Herne 	int r;
70015f36ebdSJason J. Herne 	unsigned long n;
70115f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
7022a49f61dSSean Christopherson 	int is_dirty;
70315f36ebdSJason J. Herne 
704e1e8a962SJanosch Frank 	if (kvm_is_ucontrol(kvm))
705e1e8a962SJanosch Frank 		return -EINVAL;
706e1e8a962SJanosch Frank 
70715f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
70815f36ebdSJason J. Herne 
70915f36ebdSJason J. Herne 	r = -EINVAL;
71015f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
71115f36ebdSJason J. Herne 		goto out;
71215f36ebdSJason J. Herne 
7132a49f61dSSean Christopherson 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
71415f36ebdSJason J. Herne 	if (r)
71515f36ebdSJason J. Herne 		goto out;
71615f36ebdSJason J. Herne 
71715f36ebdSJason J. Herne 	/* Clear the dirty log */
71815f36ebdSJason J. Herne 	if (is_dirty) {
71915f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
72015f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
72115f36ebdSJason J. Herne 	}
72215f36ebdSJason J. Herne 	r = 0;
72315f36ebdSJason J. Herne out:
72415f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
72515f36ebdSJason J. Herne 	return r;
726b0c632dbSHeiko Carstens }
727b0c632dbSHeiko Carstens 
7286502a34cSDavid Hildenbrand static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
7296502a34cSDavid Hildenbrand {
73046808a4cSMarc Zyngier 	unsigned long i;
7316502a34cSDavid Hildenbrand 	struct kvm_vcpu *vcpu;
7326502a34cSDavid Hildenbrand 
7336502a34cSDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
7346502a34cSDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
7356502a34cSDavid Hildenbrand 	}
7366502a34cSDavid Hildenbrand }
7376502a34cSDavid Hildenbrand 
738e5d83c74SPaolo Bonzini int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
739d938dc55SCornelia Huck {
740d938dc55SCornelia Huck 	int r;
741d938dc55SCornelia Huck 
742d938dc55SCornelia Huck 	if (cap->flags)
743d938dc55SCornelia Huck 		return -EINVAL;
744d938dc55SCornelia Huck 
745d938dc55SCornelia Huck 	switch (cap->cap) {
74684223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
747c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
74884223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
74984223598SCornelia Huck 		r = 0;
75084223598SCornelia Huck 		break;
7512444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
752c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
7532444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
7542444b352SDavid Hildenbrand 		r = 0;
7552444b352SDavid Hildenbrand 		break;
75668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
7575967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
758a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
7595967c17bSDavid Hildenbrand 			r = -EBUSY;
7605967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
761c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
762c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
7632f87d942SGuenther Hutzl 			if (test_facility(134)) {
7642f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
7652f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_list, 134);
7662f87d942SGuenther Hutzl 			}
76753743aa7SMaxim Samoylov 			if (test_facility(135)) {
76853743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
76953743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_list, 135);
77053743aa7SMaxim Samoylov 			}
7717832e91cSChristian Borntraeger 			if (test_facility(148)) {
7727832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
7737832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 148);
7747832e91cSChristian Borntraeger 			}
775d5cb6ab1SChristian Borntraeger 			if (test_facility(152)) {
776d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
777d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 152);
778d5cb6ab1SChristian Borntraeger 			}
7791f703d2cSChristian Borntraeger 			if (test_facility(192)) {
7801f703d2cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
7811f703d2cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 192);
7821f703d2cSChristian Borntraeger 			}
78318280d8bSMichael Mueller 			r = 0;
78418280d8bSMichael Mueller 		} else
78518280d8bSMichael Mueller 			r = -EINVAL;
7865967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
787c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
788c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
78968c55750SEric Farman 		break;
790c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
791c6e5f166SFan Zhang 		r = -EINVAL;
792c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
793a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
794c6e5f166SFan Zhang 			r = -EBUSY;
795c6e5f166SFan Zhang 		} else if (test_facility(64)) {
796c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
797c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
798c6e5f166SFan Zhang 			r = 0;
799c6e5f166SFan Zhang 		}
800c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
801c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
802c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
803c6e5f166SFan Zhang 		break;
80447a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
80547a4693eSYi Min Zhao 		mutex_lock(&kvm->lock);
80647a4693eSYi Min Zhao 		if (kvm->created_vcpus) {
80747a4693eSYi Min Zhao 			r = -EBUSY;
80847a4693eSYi Min Zhao 		} else {
80947a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
81047a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_list, 72);
81147a4693eSYi Min Zhao 			r = 0;
81247a4693eSYi Min Zhao 		}
81347a4693eSYi Min Zhao 		mutex_unlock(&kvm->lock);
81447a4693eSYi Min Zhao 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
81547a4693eSYi Min Zhao 			 r ? "(not available)" : "(success)");
81647a4693eSYi Min Zhao 		break;
8174e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
8184e0b1ab7SFan Zhang 		r = -EINVAL;
8194e0b1ab7SFan Zhang 		mutex_lock(&kvm->lock);
820241e3ec0SChristian Borntraeger 		if (kvm->created_vcpus) {
8214e0b1ab7SFan Zhang 			r = -EBUSY;
8224e0b1ab7SFan Zhang 		} else if (test_facility(133)) {
8234e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
8244e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_list, 133);
8254e0b1ab7SFan Zhang 			r = 0;
8264e0b1ab7SFan Zhang 		}
8274e0b1ab7SFan Zhang 		mutex_unlock(&kvm->lock);
8284e0b1ab7SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
8294e0b1ab7SFan Zhang 			 r ? "(not available)" : "(success)");
8304e0b1ab7SFan Zhang 		break;
831a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
832a4499382SJanosch Frank 		mutex_lock(&kvm->lock);
833a4499382SJanosch Frank 		if (kvm->created_vcpus)
834a4499382SJanosch Frank 			r = -EBUSY;
83540ebdb8eSJanosch Frank 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
836a4499382SJanosch Frank 			r = -EINVAL;
837a4499382SJanosch Frank 		else {
838a4499382SJanosch Frank 			r = 0;
839d8ed45c5SMichel Lespinasse 			mmap_write_lock(kvm->mm);
840a4499382SJanosch Frank 			kvm->mm->context.allow_gmap_hpage_1m = 1;
841d8ed45c5SMichel Lespinasse 			mmap_write_unlock(kvm->mm);
842a4499382SJanosch Frank 			/*
843a4499382SJanosch Frank 			 * We might have to create fake 4k page
844a4499382SJanosch Frank 			 * tables. To avoid that the hardware works on
845a4499382SJanosch Frank 			 * stale PGSTEs, we emulate these instructions.
846a4499382SJanosch Frank 			 */
847a4499382SJanosch Frank 			kvm->arch.use_skf = 0;
848a4499382SJanosch Frank 			kvm->arch.use_pfmfi = 0;
849a4499382SJanosch Frank 		}
850a4499382SJanosch Frank 		mutex_unlock(&kvm->lock);
851a4499382SJanosch Frank 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
852a4499382SJanosch Frank 			 r ? "(not available)" : "(success)");
853a4499382SJanosch Frank 		break;
854e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
855c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
856e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
857e44fc8c9SEkaterina Tumanova 		r = 0;
858e44fc8c9SEkaterina Tumanova 		break;
8596502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
8606502a34cSDavid Hildenbrand 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
8616502a34cSDavid Hildenbrand 		kvm->arch.user_instr0 = 1;
8626502a34cSDavid Hildenbrand 		icpt_operexc_on_all_vcpus(kvm);
8636502a34cSDavid Hildenbrand 		r = 0;
8646502a34cSDavid Hildenbrand 		break;
865f5ecfee9SPierre Morel 	case KVM_CAP_S390_CPU_TOPOLOGY:
866f5ecfee9SPierre Morel 		r = -EINVAL;
867f5ecfee9SPierre Morel 		mutex_lock(&kvm->lock);
868f5ecfee9SPierre Morel 		if (kvm->created_vcpus) {
869f5ecfee9SPierre Morel 			r = -EBUSY;
870f5ecfee9SPierre Morel 		} else if (test_facility(11)) {
871f5ecfee9SPierre Morel 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
872f5ecfee9SPierre Morel 			set_kvm_facility(kvm->arch.model.fac_list, 11);
873f5ecfee9SPierre Morel 			r = 0;
874f5ecfee9SPierre Morel 		}
875f5ecfee9SPierre Morel 		mutex_unlock(&kvm->lock);
876f5ecfee9SPierre Morel 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
877f5ecfee9SPierre Morel 			 r ? "(not available)" : "(success)");
878f5ecfee9SPierre Morel 		break;
879d938dc55SCornelia Huck 	default:
880d938dc55SCornelia Huck 		r = -EINVAL;
881d938dc55SCornelia Huck 		break;
882d938dc55SCornelia Huck 	}
883d938dc55SCornelia Huck 	return r;
884d938dc55SCornelia Huck }
885d938dc55SCornelia Huck 
8868c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
8878c0a7ce6SDominik Dingel {
8888c0a7ce6SDominik Dingel 	int ret;
8898c0a7ce6SDominik Dingel 
8908c0a7ce6SDominik Dingel 	switch (attr->attr) {
8918c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
8928c0a7ce6SDominik Dingel 		ret = 0;
893c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
894a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
895a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8968c0a7ce6SDominik Dingel 			ret = -EFAULT;
8978c0a7ce6SDominik Dingel 		break;
8988c0a7ce6SDominik Dingel 	default:
8998c0a7ce6SDominik Dingel 		ret = -ENXIO;
9008c0a7ce6SDominik Dingel 		break;
9018c0a7ce6SDominik Dingel 	}
9028c0a7ce6SDominik Dingel 	return ret;
9038c0a7ce6SDominik Dingel }
9048c0a7ce6SDominik Dingel 
9058c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
9064f718eabSDominik Dingel {
9074f718eabSDominik Dingel 	int ret;
9084f718eabSDominik Dingel 	unsigned int idx;
9094f718eabSDominik Dingel 	switch (attr->attr) {
9104f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
911f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
912c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
913e6db1d61SDominik Dingel 			break;
914e6db1d61SDominik Dingel 
915c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
9164f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
917a4499382SJanosch Frank 		if (kvm->created_vcpus)
918a4499382SJanosch Frank 			ret = -EBUSY;
919a4499382SJanosch Frank 		else if (kvm->mm->context.allow_gmap_hpage_1m)
920a4499382SJanosch Frank 			ret = -EINVAL;
921a4499382SJanosch Frank 		else {
9224f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
923c9f0a2b8SJanosch Frank 			/* Not compatible with cmma. */
924c9f0a2b8SJanosch Frank 			kvm->arch.use_pfmfi = 0;
9254f718eabSDominik Dingel 			ret = 0;
9264f718eabSDominik Dingel 		}
9274f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9284f718eabSDominik Dingel 		break;
9294f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
930f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
931f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
932f9cbd9b0SDavid Hildenbrand 			break;
933c3489155SDominik Dingel 		ret = -EINVAL;
934c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
935c3489155SDominik Dingel 			break;
936c3489155SDominik Dingel 
937c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
9384f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
9394f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
940a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
9414f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
9424f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9434f718eabSDominik Dingel 		ret = 0;
9444f718eabSDominik Dingel 		break;
9458c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
9468c0a7ce6SDominik Dingel 		unsigned long new_limit;
9478c0a7ce6SDominik Dingel 
9488c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
9498c0a7ce6SDominik Dingel 			return -EINVAL;
9508c0a7ce6SDominik Dingel 
9518c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
9528c0a7ce6SDominik Dingel 			return -EFAULT;
9538c0a7ce6SDominik Dingel 
954a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
955a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
9568c0a7ce6SDominik Dingel 			return -E2BIG;
9578c0a7ce6SDominik Dingel 
958a3a92c31SDominik Dingel 		if (!new_limit)
959a3a92c31SDominik Dingel 			return -EINVAL;
960a3a92c31SDominik Dingel 
9616ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
962a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
963a3a92c31SDominik Dingel 			new_limit -= 1;
964a3a92c31SDominik Dingel 
9658c0a7ce6SDominik Dingel 		ret = -EBUSY;
9668c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
967a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
9686ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
9696ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
9708c0a7ce6SDominik Dingel 
9718c0a7ce6SDominik Dingel 			if (!new) {
9728c0a7ce6SDominik Dingel 				ret = -ENOMEM;
9738c0a7ce6SDominik Dingel 			} else {
9746ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
9758c0a7ce6SDominik Dingel 				new->private = kvm;
9768c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
9778c0a7ce6SDominik Dingel 				ret = 0;
9788c0a7ce6SDominik Dingel 			}
9798c0a7ce6SDominik Dingel 		}
9808c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
981a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
982a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
983a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
9848c0a7ce6SDominik Dingel 		break;
9858c0a7ce6SDominik Dingel 	}
9864f718eabSDominik Dingel 	default:
9874f718eabSDominik Dingel 		ret = -ENXIO;
9884f718eabSDominik Dingel 		break;
9894f718eabSDominik Dingel 	}
9904f718eabSDominik Dingel 	return ret;
9914f718eabSDominik Dingel }
9924f718eabSDominik Dingel 
993a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
994a374e892STony Krowiak 
99520c922f0STony Krowiak void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
996a374e892STony Krowiak {
997a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
99846808a4cSMarc Zyngier 	unsigned long i;
999a374e892STony Krowiak 
100020c922f0STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
100120c922f0STony Krowiak 
10023194cdb7SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
100320c922f0STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
10043194cdb7SDavid Hildenbrand 		/* recreate the shadow crycb by leaving the VSIE handler */
10053194cdb7SDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
10063194cdb7SDavid Hildenbrand 	}
100720c922f0STony Krowiak 
100820c922f0STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
100920c922f0STony Krowiak }
101020c922f0STony Krowiak 
101120c922f0STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
101220c922f0STony Krowiak {
1013a374e892STony Krowiak 	mutex_lock(&kvm->lock);
1014a374e892STony Krowiak 	switch (attr->attr) {
1015a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
10168e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10178e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
101837940fb0STony Krowiak 			return -EINVAL;
10198e41bd54SChristian Borntraeger 		}
1020a374e892STony Krowiak 		get_random_bytes(
1021a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1022a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1023a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
1024c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1025a374e892STony Krowiak 		break;
1026a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
10278e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10288e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
102937940fb0STony Krowiak 			return -EINVAL;
10308e41bd54SChristian Borntraeger 		}
1031a374e892STony Krowiak 		get_random_bytes(
1032a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1033a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1034a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
1035c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1036a374e892STony Krowiak 		break;
1037a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
10388e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10398e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
104037940fb0STony Krowiak 			return -EINVAL;
10418e41bd54SChristian Borntraeger 		}
1042a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
1043a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1044a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1045c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1046a374e892STony Krowiak 		break;
1047a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
10488e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10498e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
105037940fb0STony Krowiak 			return -EINVAL;
10518e41bd54SChristian Borntraeger 		}
1052a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
1053a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1054a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1055c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1056a374e892STony Krowiak 		break;
105737940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
105837940fb0STony Krowiak 		if (!ap_instructions_available()) {
105937940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
106037940fb0STony Krowiak 			return -EOPNOTSUPP;
106137940fb0STony Krowiak 		}
106237940fb0STony Krowiak 		kvm->arch.crypto.apie = 1;
106337940fb0STony Krowiak 		break;
106437940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
106537940fb0STony Krowiak 		if (!ap_instructions_available()) {
106637940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
106737940fb0STony Krowiak 			return -EOPNOTSUPP;
106837940fb0STony Krowiak 		}
106937940fb0STony Krowiak 		kvm->arch.crypto.apie = 0;
107037940fb0STony Krowiak 		break;
1071a374e892STony Krowiak 	default:
1072a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
1073a374e892STony Krowiak 		return -ENXIO;
1074a374e892STony Krowiak 	}
1075a374e892STony Krowiak 
107620c922f0STony Krowiak 	kvm_s390_vcpu_crypto_reset_all(kvm);
1077a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
1078a374e892STony Krowiak 	return 0;
1079a374e892STony Krowiak }
1080a374e892STony Krowiak 
10813f4bbb43SMatthew Rosato static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
10823f4bbb43SMatthew Rosato {
10833f4bbb43SMatthew Rosato 	/* Only set the ECB bits after guest requests zPCI interpretation */
10843f4bbb43SMatthew Rosato 	if (!vcpu->kvm->arch.use_zpci_interp)
10853f4bbb43SMatthew Rosato 		return;
10863f4bbb43SMatthew Rosato 
10873f4bbb43SMatthew Rosato 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
10883f4bbb43SMatthew Rosato 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
10893f4bbb43SMatthew Rosato }
10903f4bbb43SMatthew Rosato 
10913f4bbb43SMatthew Rosato void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
10923f4bbb43SMatthew Rosato {
10933f4bbb43SMatthew Rosato 	struct kvm_vcpu *vcpu;
10943f4bbb43SMatthew Rosato 	unsigned long i;
10953f4bbb43SMatthew Rosato 
10963f4bbb43SMatthew Rosato 	lockdep_assert_held(&kvm->lock);
10973f4bbb43SMatthew Rosato 
10983f4bbb43SMatthew Rosato 	if (!kvm_s390_pci_interp_allowed())
10993f4bbb43SMatthew Rosato 		return;
11003f4bbb43SMatthew Rosato 
11013f4bbb43SMatthew Rosato 	/*
11023f4bbb43SMatthew Rosato 	 * If host is configured for PCI and the necessary facilities are
11033f4bbb43SMatthew Rosato 	 * available, turn on interpretation for the life of this guest
11043f4bbb43SMatthew Rosato 	 */
11053f4bbb43SMatthew Rosato 	kvm->arch.use_zpci_interp = 1;
11063f4bbb43SMatthew Rosato 
11073f4bbb43SMatthew Rosato 	kvm_s390_vcpu_block_all(kvm);
11083f4bbb43SMatthew Rosato 
11093f4bbb43SMatthew Rosato 	kvm_for_each_vcpu(i, vcpu, kvm) {
11103f4bbb43SMatthew Rosato 		kvm_s390_vcpu_pci_setup(vcpu);
11113f4bbb43SMatthew Rosato 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
11123f4bbb43SMatthew Rosato 	}
11133f4bbb43SMatthew Rosato 
11143f4bbb43SMatthew Rosato 	kvm_s390_vcpu_unblock_all(kvm);
11153f4bbb43SMatthew Rosato }
11163f4bbb43SMatthew Rosato 
1117190df4a2SClaudio Imbrenda static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1118190df4a2SClaudio Imbrenda {
111946808a4cSMarc Zyngier 	unsigned long cx;
1120190df4a2SClaudio Imbrenda 	struct kvm_vcpu *vcpu;
1121190df4a2SClaudio Imbrenda 
1122190df4a2SClaudio Imbrenda 	kvm_for_each_vcpu(cx, vcpu, kvm)
1123190df4a2SClaudio Imbrenda 		kvm_s390_sync_request(req, vcpu);
1124190df4a2SClaudio Imbrenda }
1125190df4a2SClaudio Imbrenda 
1126190df4a2SClaudio Imbrenda /*
1127190df4a2SClaudio Imbrenda  * Must be called with kvm->srcu held to avoid races on memslots, and with
11281de1ea7eSChristian Borntraeger  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1129190df4a2SClaudio Imbrenda  */
1130190df4a2SClaudio Imbrenda static int kvm_s390_vm_start_migration(struct kvm *kvm)
1131190df4a2SClaudio Imbrenda {
1132190df4a2SClaudio Imbrenda 	struct kvm_memory_slot *ms;
1133190df4a2SClaudio Imbrenda 	struct kvm_memslots *slots;
1134afdad616SClaudio Imbrenda 	unsigned long ram_pages = 0;
1135a54d8066SMaciej S. Szmigiero 	int bkt;
1136190df4a2SClaudio Imbrenda 
1137190df4a2SClaudio Imbrenda 	/* migration mode already enabled */
1138afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
1139190df4a2SClaudio Imbrenda 		return 0;
1140190df4a2SClaudio Imbrenda 	slots = kvm_memslots(kvm);
1141a54d8066SMaciej S. Szmigiero 	if (!slots || kvm_memslots_empty(slots))
1142190df4a2SClaudio Imbrenda 		return -EINVAL;
1143190df4a2SClaudio Imbrenda 
1144afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma) {
1145afdad616SClaudio Imbrenda 		kvm->arch.migration_mode = 1;
1146afdad616SClaudio Imbrenda 		return 0;
1147190df4a2SClaudio Imbrenda 	}
1148190df4a2SClaudio Imbrenda 	/* mark all the pages in active slots as dirty */
1149a54d8066SMaciej S. Szmigiero 	kvm_for_each_memslot(ms, bkt, slots) {
115013a17cc0SIgor Mammedov 		if (!ms->dirty_bitmap)
115113a17cc0SIgor Mammedov 			return -EINVAL;
1152afdad616SClaudio Imbrenda 		/*
1153afdad616SClaudio Imbrenda 		 * The second half of the bitmap is only used on x86,
1154afdad616SClaudio Imbrenda 		 * and would be wasted otherwise, so we put it to good
1155afdad616SClaudio Imbrenda 		 * use here to keep track of the state of the storage
1156afdad616SClaudio Imbrenda 		 * attributes.
1157afdad616SClaudio Imbrenda 		 */
1158afdad616SClaudio Imbrenda 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1159afdad616SClaudio Imbrenda 		ram_pages += ms->npages;
1160190df4a2SClaudio Imbrenda 	}
1161afdad616SClaudio Imbrenda 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1162afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 1;
1163190df4a2SClaudio Imbrenda 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1164190df4a2SClaudio Imbrenda 	return 0;
1165190df4a2SClaudio Imbrenda }
1166190df4a2SClaudio Imbrenda 
1167190df4a2SClaudio Imbrenda /*
11681de1ea7eSChristian Borntraeger  * Must be called with kvm->slots_lock to avoid races with ourselves and
1169190df4a2SClaudio Imbrenda  * kvm_s390_vm_start_migration.
1170190df4a2SClaudio Imbrenda  */
1171190df4a2SClaudio Imbrenda static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1172190df4a2SClaudio Imbrenda {
1173190df4a2SClaudio Imbrenda 	/* migration mode already disabled */
1174afdad616SClaudio Imbrenda 	if (!kvm->arch.migration_mode)
1175190df4a2SClaudio Imbrenda 		return 0;
1176afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 0;
1177afdad616SClaudio Imbrenda 	if (kvm->arch.use_cmma)
1178190df4a2SClaudio Imbrenda 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1179190df4a2SClaudio Imbrenda 	return 0;
1180190df4a2SClaudio Imbrenda }
1181190df4a2SClaudio Imbrenda 
1182190df4a2SClaudio Imbrenda static int kvm_s390_vm_set_migration(struct kvm *kvm,
1183190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1184190df4a2SClaudio Imbrenda {
11851de1ea7eSChristian Borntraeger 	int res = -ENXIO;
1186190df4a2SClaudio Imbrenda 
11871de1ea7eSChristian Borntraeger 	mutex_lock(&kvm->slots_lock);
1188190df4a2SClaudio Imbrenda 	switch (attr->attr) {
1189190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_START:
1190190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_start_migration(kvm);
1191190df4a2SClaudio Imbrenda 		break;
1192190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_STOP:
1193190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_stop_migration(kvm);
1194190df4a2SClaudio Imbrenda 		break;
1195190df4a2SClaudio Imbrenda 	default:
1196190df4a2SClaudio Imbrenda 		break;
1197190df4a2SClaudio Imbrenda 	}
11981de1ea7eSChristian Borntraeger 	mutex_unlock(&kvm->slots_lock);
1199190df4a2SClaudio Imbrenda 
1200190df4a2SClaudio Imbrenda 	return res;
1201190df4a2SClaudio Imbrenda }
1202190df4a2SClaudio Imbrenda 
1203190df4a2SClaudio Imbrenda static int kvm_s390_vm_get_migration(struct kvm *kvm,
1204190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1205190df4a2SClaudio Imbrenda {
1206afdad616SClaudio Imbrenda 	u64 mig = kvm->arch.migration_mode;
1207190df4a2SClaudio Imbrenda 
1208190df4a2SClaudio Imbrenda 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1209190df4a2SClaudio Imbrenda 		return -ENXIO;
1210190df4a2SClaudio Imbrenda 
1211190df4a2SClaudio Imbrenda 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1212190df4a2SClaudio Imbrenda 		return -EFAULT;
1213190df4a2SClaudio Imbrenda 	return 0;
1214190df4a2SClaudio Imbrenda }
1215190df4a2SClaudio Imbrenda 
12166973091dSNico Boehr static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
12176973091dSNico Boehr 
12188fa1696eSCollin L. Walling static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
12198fa1696eSCollin L. Walling {
12208fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
12218fa1696eSCollin L. Walling 
12228fa1696eSCollin L. Walling 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
12238fa1696eSCollin L. Walling 		return -EFAULT;
12248fa1696eSCollin L. Walling 
12250e7def5fSDavid Hildenbrand 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
12268fa1696eSCollin L. Walling 		return -EINVAL;
12276973091dSNico Boehr 	__kvm_s390_set_tod_clock(kvm, &gtod);
12288fa1696eSCollin L. Walling 
12298fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
12308fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
12318fa1696eSCollin L. Walling 
12328fa1696eSCollin L. Walling 	return 0;
12338fa1696eSCollin L. Walling }
12348fa1696eSCollin L. Walling 
123572f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
123672f25020SJason J. Herne {
123772f25020SJason J. Herne 	u8 gtod_high;
123872f25020SJason J. Herne 
123972f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
124072f25020SJason J. Herne 					   sizeof(gtod_high)))
124172f25020SJason J. Herne 		return -EFAULT;
124272f25020SJason J. Herne 
124372f25020SJason J. Herne 	if (gtod_high != 0)
124472f25020SJason J. Herne 		return -EINVAL;
124558c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
124672f25020SJason J. Herne 
124772f25020SJason J. Herne 	return 0;
124872f25020SJason J. Herne }
124972f25020SJason J. Herne 
125072f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
125172f25020SJason J. Herne {
12520e7def5fSDavid Hildenbrand 	struct kvm_s390_vm_tod_clock gtod = { 0 };
125372f25020SJason J. Herne 
12540e7def5fSDavid Hildenbrand 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
12550e7def5fSDavid Hildenbrand 			   sizeof(gtod.tod)))
125672f25020SJason J. Herne 		return -EFAULT;
125772f25020SJason J. Herne 
12586973091dSNico Boehr 	__kvm_s390_set_tod_clock(kvm, &gtod);
12590e7def5fSDavid Hildenbrand 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
126072f25020SJason J. Herne 	return 0;
126172f25020SJason J. Herne }
126272f25020SJason J. Herne 
126372f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
126472f25020SJason J. Herne {
126572f25020SJason J. Herne 	int ret;
126672f25020SJason J. Herne 
126772f25020SJason J. Herne 	if (attr->flags)
126872f25020SJason J. Herne 		return -EINVAL;
126972f25020SJason J. Herne 
12706973091dSNico Boehr 	mutex_lock(&kvm->lock);
12716973091dSNico Boehr 	/*
12726973091dSNico Boehr 	 * For protected guests, the TOD is managed by the ultravisor, so trying
12736973091dSNico Boehr 	 * to change it will never bring the expected results.
12746973091dSNico Boehr 	 */
12756973091dSNico Boehr 	if (kvm_s390_pv_is_protected(kvm)) {
12766973091dSNico Boehr 		ret = -EOPNOTSUPP;
12776973091dSNico Boehr 		goto out_unlock;
12786973091dSNico Boehr 	}
12796973091dSNico Boehr 
128072f25020SJason J. Herne 	switch (attr->attr) {
12818fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
12828fa1696eSCollin L. Walling 		ret = kvm_s390_set_tod_ext(kvm, attr);
12838fa1696eSCollin L. Walling 		break;
128472f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
128572f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
128672f25020SJason J. Herne 		break;
128772f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
128872f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
128972f25020SJason J. Herne 		break;
129072f25020SJason J. Herne 	default:
129172f25020SJason J. Herne 		ret = -ENXIO;
129272f25020SJason J. Herne 		break;
129372f25020SJason J. Herne 	}
12946973091dSNico Boehr 
12956973091dSNico Boehr out_unlock:
12966973091dSNico Boehr 	mutex_unlock(&kvm->lock);
129772f25020SJason J. Herne 	return ret;
129872f25020SJason J. Herne }
129972f25020SJason J. Herne 
130033d1b272SDavid Hildenbrand static void kvm_s390_get_tod_clock(struct kvm *kvm,
13018fa1696eSCollin L. Walling 				   struct kvm_s390_vm_tod_clock *gtod)
13028fa1696eSCollin L. Walling {
13032cfd7b73SHeiko Carstens 	union tod_clock clk;
13048fa1696eSCollin L. Walling 
13058fa1696eSCollin L. Walling 	preempt_disable();
13068fa1696eSCollin L. Walling 
13072cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
13088fa1696eSCollin L. Walling 
13092cfd7b73SHeiko Carstens 	gtod->tod = clk.tod + kvm->arch.epoch;
131033d1b272SDavid Hildenbrand 	gtod->epoch_idx = 0;
131133d1b272SDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
13122cfd7b73SHeiko Carstens 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
13132cfd7b73SHeiko Carstens 		if (gtod->tod < clk.tod)
13148fa1696eSCollin L. Walling 			gtod->epoch_idx += 1;
131533d1b272SDavid Hildenbrand 	}
13168fa1696eSCollin L. Walling 
13178fa1696eSCollin L. Walling 	preempt_enable();
13188fa1696eSCollin L. Walling }
13198fa1696eSCollin L. Walling 
13208fa1696eSCollin L. Walling static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
13218fa1696eSCollin L. Walling {
13228fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
13238fa1696eSCollin L. Walling 
13248fa1696eSCollin L. Walling 	memset(&gtod, 0, sizeof(gtod));
132533d1b272SDavid Hildenbrand 	kvm_s390_get_tod_clock(kvm, &gtod);
13268fa1696eSCollin L. Walling 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
13278fa1696eSCollin L. Walling 		return -EFAULT;
13288fa1696eSCollin L. Walling 
13298fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
13308fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
13318fa1696eSCollin L. Walling 	return 0;
13328fa1696eSCollin L. Walling }
13338fa1696eSCollin L. Walling 
133472f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
133572f25020SJason J. Herne {
133672f25020SJason J. Herne 	u8 gtod_high = 0;
133772f25020SJason J. Herne 
133872f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
133972f25020SJason J. Herne 					 sizeof(gtod_high)))
134072f25020SJason J. Herne 		return -EFAULT;
134158c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
134272f25020SJason J. Herne 
134372f25020SJason J. Herne 	return 0;
134472f25020SJason J. Herne }
134572f25020SJason J. Herne 
134672f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
134772f25020SJason J. Herne {
13485a3d883aSDavid Hildenbrand 	u64 gtod;
134972f25020SJason J. Herne 
135060417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
135172f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
135272f25020SJason J. Herne 		return -EFAULT;
135358c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
135472f25020SJason J. Herne 
135572f25020SJason J. Herne 	return 0;
135672f25020SJason J. Herne }
135772f25020SJason J. Herne 
135872f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
135972f25020SJason J. Herne {
136072f25020SJason J. Herne 	int ret;
136172f25020SJason J. Herne 
136272f25020SJason J. Herne 	if (attr->flags)
136372f25020SJason J. Herne 		return -EINVAL;
136472f25020SJason J. Herne 
136572f25020SJason J. Herne 	switch (attr->attr) {
13668fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
13678fa1696eSCollin L. Walling 		ret = kvm_s390_get_tod_ext(kvm, attr);
13688fa1696eSCollin L. Walling 		break;
136972f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
137072f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
137172f25020SJason J. Herne 		break;
137272f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
137372f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
137472f25020SJason J. Herne 		break;
137572f25020SJason J. Herne 	default:
137672f25020SJason J. Herne 		ret = -ENXIO;
137772f25020SJason J. Herne 		break;
137872f25020SJason J. Herne 	}
137972f25020SJason J. Herne 	return ret;
138072f25020SJason J. Herne }
138172f25020SJason J. Herne 
1382658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1383658b6edaSMichael Mueller {
1384658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1385053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
1386658b6edaSMichael Mueller 	int ret = 0;
1387658b6edaSMichael Mueller 
1388658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
1389a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
1390658b6edaSMichael Mueller 		ret = -EBUSY;
1391658b6edaSMichael Mueller 		goto out;
1392658b6edaSMichael Mueller 	}
1393c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1394658b6edaSMichael Mueller 	if (!proc) {
1395658b6edaSMichael Mueller 		ret = -ENOMEM;
1396658b6edaSMichael Mueller 		goto out;
1397658b6edaSMichael Mueller 	}
1398658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
1399658b6edaSMichael Mueller 			    sizeof(*proc))) {
14009bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
1401053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1402053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
14030487c44dSDavid Hildenbrand 		if (lowest_ibc && proc->ibc) {
1404053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
1405053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
1406053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
1407053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
1408053dd230SDavid Hildenbrand 			else
1409658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
1410053dd230SDavid Hildenbrand 		}
1411c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1412658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1413a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1414a8c39dd7SChristian Borntraeger 			 kvm->arch.model.ibc,
1415a8c39dd7SChristian Borntraeger 			 kvm->arch.model.cpuid);
1416a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1417a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[0],
1418a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[1],
1419a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[2]);
1420658b6edaSMichael Mueller 	} else
1421658b6edaSMichael Mueller 		ret = -EFAULT;
1422658b6edaSMichael Mueller 	kfree(proc);
1423658b6edaSMichael Mueller out:
1424658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
1425658b6edaSMichael Mueller 	return ret;
1426658b6edaSMichael Mueller }
1427658b6edaSMichael Mueller 
142815c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
142915c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
143015c9705fSDavid Hildenbrand {
143115c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
143215c9705fSDavid Hildenbrand 
143315c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
143415c9705fSDavid Hildenbrand 		return -EFAULT;
143515c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
143615c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
143715c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
143815c9705fSDavid Hildenbrand 		return -EINVAL;
143915c9705fSDavid Hildenbrand 
144015c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
14412f8311c9SChristian Borntraeger 	if (kvm->created_vcpus) {
14422f8311c9SChristian Borntraeger 		mutex_unlock(&kvm->lock);
14432f8311c9SChristian Borntraeger 		return -EBUSY;
14442f8311c9SChristian Borntraeger 	}
1445da0f8e95SYury Norov 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
144615c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
14472f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
14482f8311c9SChristian Borntraeger 			 data.feat[0],
14492f8311c9SChristian Borntraeger 			 data.feat[1],
14502f8311c9SChristian Borntraeger 			 data.feat[2]);
14512f8311c9SChristian Borntraeger 	return 0;
145215c9705fSDavid Hildenbrand }
145315c9705fSDavid Hildenbrand 
14540a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
14550a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
14560a763c78SDavid Hildenbrand {
1457346fa2f8SChristian Borntraeger 	mutex_lock(&kvm->lock);
1458346fa2f8SChristian Borntraeger 	if (kvm->created_vcpus) {
1459346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1460346fa2f8SChristian Borntraeger 		return -EBUSY;
1461346fa2f8SChristian Borntraeger 	}
1462346fa2f8SChristian Borntraeger 
1463346fa2f8SChristian Borntraeger 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1464346fa2f8SChristian Borntraeger 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1465346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1466346fa2f8SChristian Borntraeger 		return -EFAULT;
1467346fa2f8SChristian Borntraeger 	}
1468346fa2f8SChristian Borntraeger 	mutex_unlock(&kvm->lock);
1469346fa2f8SChristian Borntraeger 
147011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
147111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
147211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
147311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
147411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
147511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
147611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
147711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
147811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
147911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
148011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
148111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
148211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
148311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
148411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
148511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
148611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
148711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
148811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
148911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
149011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
149111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
149211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
149311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
149411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
149511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
149611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
149711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
149811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
149911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
150011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
150111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
150211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
150311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
150411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
150511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
150611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
150711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
150811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
150911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
151011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
151111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
151211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
151311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
151413209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
151513209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
151613209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1517173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1518173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1519173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1520173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1521173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
15224f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
15234f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
15244f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
15254f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
15264f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
152711ba5961SChristian Borntraeger 
1528346fa2f8SChristian Borntraeger 	return 0;
15290a763c78SDavid Hildenbrand }
15300a763c78SDavid Hildenbrand 
1531658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1532658b6edaSMichael Mueller {
1533658b6edaSMichael Mueller 	int ret = -ENXIO;
1534658b6edaSMichael Mueller 
1535658b6edaSMichael Mueller 	switch (attr->attr) {
1536658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1537658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
1538658b6edaSMichael Mueller 		break;
153915c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
154015c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
154115c9705fSDavid Hildenbrand 		break;
15420a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
15430a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
15440a763c78SDavid Hildenbrand 		break;
1545658b6edaSMichael Mueller 	}
1546658b6edaSMichael Mueller 	return ret;
1547658b6edaSMichael Mueller }
1548658b6edaSMichael Mueller 
1549658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1550658b6edaSMichael Mueller {
1551658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1552658b6edaSMichael Mueller 	int ret = 0;
1553658b6edaSMichael Mueller 
1554c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1555658b6edaSMichael Mueller 	if (!proc) {
1556658b6edaSMichael Mueller 		ret = -ENOMEM;
1557658b6edaSMichael Mueller 		goto out;
1558658b6edaSMichael Mueller 	}
15599bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
1560658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
1561c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1562c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1563a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1564a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1565a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1566a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1567a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[0],
1568a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[1],
1569a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[2]);
1570658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1571658b6edaSMichael Mueller 		ret = -EFAULT;
1572658b6edaSMichael Mueller 	kfree(proc);
1573658b6edaSMichael Mueller out:
1574658b6edaSMichael Mueller 	return ret;
1575658b6edaSMichael Mueller }
1576658b6edaSMichael Mueller 
1577658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1578658b6edaSMichael Mueller {
1579658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
1580658b6edaSMichael Mueller 	int ret = 0;
1581658b6edaSMichael Mueller 
1582c4196218SChristian Borntraeger 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1583658b6edaSMichael Mueller 	if (!mach) {
1584658b6edaSMichael Mueller 		ret = -ENOMEM;
1585658b6edaSMichael Mueller 		goto out;
1586658b6edaSMichael Mueller 	}
1587658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
158837c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
1589c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1590981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
159117e89e13SSven Schnelle 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
159217e89e13SSven Schnelle 	       sizeof(stfle_fac_list));
1593a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1594a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1595a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1596a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1597a8c39dd7SChristian Borntraeger 		 mach->fac_mask[0],
1598a8c39dd7SChristian Borntraeger 		 mach->fac_mask[1],
1599a8c39dd7SChristian Borntraeger 		 mach->fac_mask[2]);
1600a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1601a8c39dd7SChristian Borntraeger 		 mach->fac_list[0],
1602a8c39dd7SChristian Borntraeger 		 mach->fac_list[1],
1603a8c39dd7SChristian Borntraeger 		 mach->fac_list[2]);
1604658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1605658b6edaSMichael Mueller 		ret = -EFAULT;
1606658b6edaSMichael Mueller 	kfree(mach);
1607658b6edaSMichael Mueller out:
1608658b6edaSMichael Mueller 	return ret;
1609658b6edaSMichael Mueller }
1610658b6edaSMichael Mueller 
161115c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
161215c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
161315c9705fSDavid Hildenbrand {
161415c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
161515c9705fSDavid Hildenbrand 
1616da0f8e95SYury Norov 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
161715c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
161815c9705fSDavid Hildenbrand 		return -EFAULT;
16192f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
16202f8311c9SChristian Borntraeger 			 data.feat[0],
16212f8311c9SChristian Borntraeger 			 data.feat[1],
16222f8311c9SChristian Borntraeger 			 data.feat[2]);
162315c9705fSDavid Hildenbrand 	return 0;
162415c9705fSDavid Hildenbrand }
162515c9705fSDavid Hildenbrand 
162615c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
162715c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
162815c9705fSDavid Hildenbrand {
162915c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
163015c9705fSDavid Hildenbrand 
1631da0f8e95SYury Norov 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
163215c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
163315c9705fSDavid Hildenbrand 		return -EFAULT;
16342f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
16352f8311c9SChristian Borntraeger 			 data.feat[0],
16362f8311c9SChristian Borntraeger 			 data.feat[1],
16372f8311c9SChristian Borntraeger 			 data.feat[2]);
163815c9705fSDavid Hildenbrand 	return 0;
163915c9705fSDavid Hildenbrand }
164015c9705fSDavid Hildenbrand 
16410a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
16420a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
16430a763c78SDavid Hildenbrand {
1644346fa2f8SChristian Borntraeger 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1645346fa2f8SChristian Borntraeger 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1646346fa2f8SChristian Borntraeger 		return -EFAULT;
1647346fa2f8SChristian Borntraeger 
164811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
164911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
165011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
165111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
165211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
165311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
165411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
165511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
165611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
165711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
165811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
165911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
166011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
166111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
166211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
166311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
166411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
166511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
166611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
166711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
166811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
166911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
167011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
167111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
167211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
167311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
167411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
167511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
167611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
167711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
167811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
167911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
168011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
168111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
168211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
168311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
168411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
168511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
168611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
168711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
168811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
168911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
169011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
169111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
169213209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
169313209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
169413209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1695173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1696173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1697173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1698173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1699173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
17004f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17014f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
17024f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
17034f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
17044f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
170511ba5961SChristian Borntraeger 
1706346fa2f8SChristian Borntraeger 	return 0;
17070a763c78SDavid Hildenbrand }
17080a763c78SDavid Hildenbrand 
17090a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
17100a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
17110a763c78SDavid Hildenbrand {
17120a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
17130a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
17140a763c78SDavid Hildenbrand 		return -EFAULT;
171511ba5961SChristian Borntraeger 
171611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
171711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
171811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
171911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
172011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
172111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
172211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
172311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
172411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
172511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
172611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
172711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
172811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
172911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
173011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
173111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
173211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
173311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
173411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
173511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
173611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
173711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
173811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
173911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
174011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
174111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
174211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
174311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
174411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
174511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
174611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
174711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
174811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
174911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
175011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
175111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
175211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
175311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
175411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
175511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
175611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
175711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
175811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
175911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
176013209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
176113209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
176213209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1763173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1764173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1765173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1766173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1767173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
17684f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17694f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
17704f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
17714f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
17724f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
177311ba5961SChristian Borntraeger 
17740a763c78SDavid Hildenbrand 	return 0;
17750a763c78SDavid Hildenbrand }
1776346fa2f8SChristian Borntraeger 
1777658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1778658b6edaSMichael Mueller {
1779658b6edaSMichael Mueller 	int ret = -ENXIO;
1780658b6edaSMichael Mueller 
1781658b6edaSMichael Mueller 	switch (attr->attr) {
1782658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1783658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
1784658b6edaSMichael Mueller 		break;
1785658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
1786658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
1787658b6edaSMichael Mueller 		break;
178815c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
178915c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
179015c9705fSDavid Hildenbrand 		break;
179115c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
179215c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
179315c9705fSDavid Hildenbrand 		break;
17940a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
17950a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
17960a763c78SDavid Hildenbrand 		break;
17970a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
17980a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
17990a763c78SDavid Hildenbrand 		break;
1800658b6edaSMichael Mueller 	}
1801658b6edaSMichael Mueller 	return ret;
1802658b6edaSMichael Mueller }
1803658b6edaSMichael Mueller 
180424fe0195SPierre Morel /**
180524fe0195SPierre Morel  * kvm_s390_update_topology_change_report - update CPU topology change report
180624fe0195SPierre Morel  * @kvm: guest KVM description
180724fe0195SPierre Morel  * @val: set or clear the MTCR bit
180824fe0195SPierre Morel  *
180924fe0195SPierre Morel  * Updates the Multiprocessor Topology-Change-Report bit to signal
181024fe0195SPierre Morel  * the guest with a topology change.
181124fe0195SPierre Morel  * This is only relevant if the topology facility is present.
181224fe0195SPierre Morel  *
181324fe0195SPierre Morel  * The SCA version, bsca or esca, doesn't matter as offset is the same.
181424fe0195SPierre Morel  */
181524fe0195SPierre Morel static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
181624fe0195SPierre Morel {
181724fe0195SPierre Morel 	union sca_utility new, old;
181824fe0195SPierre Morel 	struct bsca_block *sca;
181924fe0195SPierre Morel 
182024fe0195SPierre Morel 	read_lock(&kvm->arch.sca_lock);
182124fe0195SPierre Morel 	sca = kvm->arch.sca;
182224fe0195SPierre Morel 	do {
182324fe0195SPierre Morel 		old = READ_ONCE(sca->utility);
182424fe0195SPierre Morel 		new = old;
182524fe0195SPierre Morel 		new.mtcr = val;
182624fe0195SPierre Morel 	} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
182724fe0195SPierre Morel 	read_unlock(&kvm->arch.sca_lock);
182824fe0195SPierre Morel }
182924fe0195SPierre Morel 
1830f5ecfee9SPierre Morel static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1831f5ecfee9SPierre Morel 					       struct kvm_device_attr *attr)
1832f5ecfee9SPierre Morel {
1833f5ecfee9SPierre Morel 	if (!test_kvm_facility(kvm, 11))
1834f5ecfee9SPierre Morel 		return -ENXIO;
1835f5ecfee9SPierre Morel 
1836f5ecfee9SPierre Morel 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1837f5ecfee9SPierre Morel 	return 0;
1838f5ecfee9SPierre Morel }
1839f5ecfee9SPierre Morel 
1840f5ecfee9SPierre Morel static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1841f5ecfee9SPierre Morel 					       struct kvm_device_attr *attr)
1842f5ecfee9SPierre Morel {
1843f5ecfee9SPierre Morel 	u8 topo;
1844f5ecfee9SPierre Morel 
1845f5ecfee9SPierre Morel 	if (!test_kvm_facility(kvm, 11))
1846f5ecfee9SPierre Morel 		return -ENXIO;
1847f5ecfee9SPierre Morel 
1848f5ecfee9SPierre Morel 	read_lock(&kvm->arch.sca_lock);
1849f5ecfee9SPierre Morel 	topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1850f5ecfee9SPierre Morel 	read_unlock(&kvm->arch.sca_lock);
1851f5ecfee9SPierre Morel 
1852f5ecfee9SPierre Morel 	return put_user(topo, (u8 __user *)attr->addr);
1853f5ecfee9SPierre Morel }
1854f5ecfee9SPierre Morel 
1855f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1856f2061656SDominik Dingel {
1857f2061656SDominik Dingel 	int ret;
1858f2061656SDominik Dingel 
1859f2061656SDominik Dingel 	switch (attr->group) {
18604f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18618c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
18624f718eabSDominik Dingel 		break;
186372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
186472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
186572f25020SJason J. Herne 		break;
1866658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1867658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
1868658b6edaSMichael Mueller 		break;
1869a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1870a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1871a374e892STony Krowiak 		break;
1872190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1873190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_set_migration(kvm, attr);
1874190df4a2SClaudio Imbrenda 		break;
1875f5ecfee9SPierre Morel 	case KVM_S390_VM_CPU_TOPOLOGY:
1876f5ecfee9SPierre Morel 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1877f5ecfee9SPierre Morel 		break;
1878f2061656SDominik Dingel 	default:
1879f2061656SDominik Dingel 		ret = -ENXIO;
1880f2061656SDominik Dingel 		break;
1881f2061656SDominik Dingel 	}
1882f2061656SDominik Dingel 
1883f2061656SDominik Dingel 	return ret;
1884f2061656SDominik Dingel }
1885f2061656SDominik Dingel 
1886f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1887f2061656SDominik Dingel {
18888c0a7ce6SDominik Dingel 	int ret;
18898c0a7ce6SDominik Dingel 
18908c0a7ce6SDominik Dingel 	switch (attr->group) {
18918c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18928c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
18938c0a7ce6SDominik Dingel 		break;
189472f25020SJason J. Herne 	case KVM_S390_VM_TOD:
189572f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
189672f25020SJason J. Herne 		break;
1897658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1898658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1899658b6edaSMichael Mueller 		break;
1900190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1901190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_get_migration(kvm, attr);
1902190df4a2SClaudio Imbrenda 		break;
1903f5ecfee9SPierre Morel 	case KVM_S390_VM_CPU_TOPOLOGY:
1904f5ecfee9SPierre Morel 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
1905f5ecfee9SPierre Morel 		break;
19068c0a7ce6SDominik Dingel 	default:
19078c0a7ce6SDominik Dingel 		ret = -ENXIO;
19088c0a7ce6SDominik Dingel 		break;
19098c0a7ce6SDominik Dingel 	}
19108c0a7ce6SDominik Dingel 
19118c0a7ce6SDominik Dingel 	return ret;
1912f2061656SDominik Dingel }
1913f2061656SDominik Dingel 
1914f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1915f2061656SDominik Dingel {
1916f2061656SDominik Dingel 	int ret;
1917f2061656SDominik Dingel 
1918f2061656SDominik Dingel 	switch (attr->group) {
19194f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
19204f718eabSDominik Dingel 		switch (attr->attr) {
19214f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
19224f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1923f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1924f9cbd9b0SDavid Hildenbrand 			break;
19258c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
19264f718eabSDominik Dingel 			ret = 0;
19274f718eabSDominik Dingel 			break;
19284f718eabSDominik Dingel 		default:
19294f718eabSDominik Dingel 			ret = -ENXIO;
19304f718eabSDominik Dingel 			break;
19314f718eabSDominik Dingel 		}
19324f718eabSDominik Dingel 		break;
193372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
193472f25020SJason J. Herne 		switch (attr->attr) {
193572f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
193672f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
193772f25020SJason J. Herne 			ret = 0;
193872f25020SJason J. Herne 			break;
193972f25020SJason J. Herne 		default:
194072f25020SJason J. Herne 			ret = -ENXIO;
194172f25020SJason J. Herne 			break;
194272f25020SJason J. Herne 		}
194372f25020SJason J. Herne 		break;
1944658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1945658b6edaSMichael Mueller 		switch (attr->attr) {
1946658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1947658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
194815c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
194915c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
19500a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1951346fa2f8SChristian Borntraeger 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1952658b6edaSMichael Mueller 			ret = 0;
1953658b6edaSMichael Mueller 			break;
1954658b6edaSMichael Mueller 		default:
1955658b6edaSMichael Mueller 			ret = -ENXIO;
1956658b6edaSMichael Mueller 			break;
1957658b6edaSMichael Mueller 		}
1958658b6edaSMichael Mueller 		break;
1959a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1960a374e892STony Krowiak 		switch (attr->attr) {
1961a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1962a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1963a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1964a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1965a374e892STony Krowiak 			ret = 0;
1966a374e892STony Krowiak 			break;
196737940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
196837940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
196937940fb0STony Krowiak 			ret = ap_instructions_available() ? 0 : -ENXIO;
197037940fb0STony Krowiak 			break;
1971a374e892STony Krowiak 		default:
1972a374e892STony Krowiak 			ret = -ENXIO;
1973a374e892STony Krowiak 			break;
1974a374e892STony Krowiak 		}
1975a374e892STony Krowiak 		break;
1976190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1977190df4a2SClaudio Imbrenda 		ret = 0;
1978190df4a2SClaudio Imbrenda 		break;
1979f5ecfee9SPierre Morel 	case KVM_S390_VM_CPU_TOPOLOGY:
1980f5ecfee9SPierre Morel 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
1981f5ecfee9SPierre Morel 		break;
1982f2061656SDominik Dingel 	default:
1983f2061656SDominik Dingel 		ret = -ENXIO;
1984f2061656SDominik Dingel 		break;
1985f2061656SDominik Dingel 	}
1986f2061656SDominik Dingel 
1987f2061656SDominik Dingel 	return ret;
1988f2061656SDominik Dingel }
1989f2061656SDominik Dingel 
199030ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
199130ee2a98SJason J. Herne {
199230ee2a98SJason J. Herne 	uint8_t *keys;
199330ee2a98SJason J. Herne 	uint64_t hva;
19944f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
199530ee2a98SJason J. Herne 
199630ee2a98SJason J. Herne 	if (args->flags != 0)
199730ee2a98SJason J. Herne 		return -EINVAL;
199830ee2a98SJason J. Herne 
199930ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
200055531b74SJanosch Frank 	if (!mm_uses_skeys(current->mm))
200130ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
200230ee2a98SJason J. Herne 
200330ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
200430ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
200530ee2a98SJason J. Herne 		return -EINVAL;
200630ee2a98SJason J. Herne 
2007c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
200830ee2a98SJason J. Herne 	if (!keys)
200930ee2a98SJason J. Herne 		return -ENOMEM;
201030ee2a98SJason J. Herne 
2011d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
20124f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
201330ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
201430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
201530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
201630ee2a98SJason J. Herne 			r = -EFAULT;
2017d3ed1ceeSMartin Schwidefsky 			break;
201830ee2a98SJason J. Herne 		}
201930ee2a98SJason J. Herne 
2020154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
2021154c8c19SDavid Hildenbrand 		if (r)
2022d3ed1ceeSMartin Schwidefsky 			break;
202330ee2a98SJason J. Herne 	}
20244f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2025d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
202630ee2a98SJason J. Herne 
2027d3ed1ceeSMartin Schwidefsky 	if (!r) {
202830ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
202930ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
203030ee2a98SJason J. Herne 		if (r)
203130ee2a98SJason J. Herne 			r = -EFAULT;
2032d3ed1ceeSMartin Schwidefsky 	}
2033d3ed1ceeSMartin Schwidefsky 
203430ee2a98SJason J. Herne 	kvfree(keys);
203530ee2a98SJason J. Herne 	return r;
203630ee2a98SJason J. Herne }
203730ee2a98SJason J. Herne 
203830ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
203930ee2a98SJason J. Herne {
204030ee2a98SJason J. Herne 	uint8_t *keys;
204130ee2a98SJason J. Herne 	uint64_t hva;
20424f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
2043bd096f64SJanosch Frank 	bool unlocked;
204430ee2a98SJason J. Herne 
204530ee2a98SJason J. Herne 	if (args->flags != 0)
204630ee2a98SJason J. Herne 		return -EINVAL;
204730ee2a98SJason J. Herne 
204830ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
204930ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
205030ee2a98SJason J. Herne 		return -EINVAL;
205130ee2a98SJason J. Herne 
2052c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
205330ee2a98SJason J. Herne 	if (!keys)
205430ee2a98SJason J. Herne 		return -ENOMEM;
205530ee2a98SJason J. Herne 
205630ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
205730ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
205830ee2a98SJason J. Herne 	if (r) {
205930ee2a98SJason J. Herne 		r = -EFAULT;
206030ee2a98SJason J. Herne 		goto out;
206130ee2a98SJason J. Herne 	}
206230ee2a98SJason J. Herne 
206330ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
206414d4a425SDominik Dingel 	r = s390_enable_skey();
206514d4a425SDominik Dingel 	if (r)
206614d4a425SDominik Dingel 		goto out;
206730ee2a98SJason J. Herne 
2068bd096f64SJanosch Frank 	i = 0;
2069d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
20704f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
2071bd096f64SJanosch Frank         while (i < args->count) {
2072bd096f64SJanosch Frank 		unlocked = false;
207330ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
207430ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
207530ee2a98SJason J. Herne 			r = -EFAULT;
2076d3ed1ceeSMartin Schwidefsky 			break;
207730ee2a98SJason J. Herne 		}
207830ee2a98SJason J. Herne 
207930ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
208030ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
208130ee2a98SJason J. Herne 			r = -EINVAL;
2082d3ed1ceeSMartin Schwidefsky 			break;
208330ee2a98SJason J. Herne 		}
208430ee2a98SJason J. Herne 
2085fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2086bd096f64SJanosch Frank 		if (r) {
208764019a2eSPeter Xu 			r = fixup_user_fault(current->mm, hva,
2088bd096f64SJanosch Frank 					     FAULT_FLAG_WRITE, &unlocked);
208930ee2a98SJason J. Herne 			if (r)
2090d3ed1ceeSMartin Schwidefsky 				break;
209130ee2a98SJason J. Herne 		}
2092bd096f64SJanosch Frank 		if (!r)
2093bd096f64SJanosch Frank 			i++;
2094bd096f64SJanosch Frank 	}
20954f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2096d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
209730ee2a98SJason J. Herne out:
209830ee2a98SJason J. Herne 	kvfree(keys);
209930ee2a98SJason J. Herne 	return r;
210030ee2a98SJason J. Herne }
210130ee2a98SJason J. Herne 
21024036e387SClaudio Imbrenda /*
21034036e387SClaudio Imbrenda  * Base address and length must be sent at the start of each block, therefore
21044036e387SClaudio Imbrenda  * it's cheaper to send some clean data, as long as it's less than the size of
21054036e387SClaudio Imbrenda  * two longs.
21064036e387SClaudio Imbrenda  */
21074036e387SClaudio Imbrenda #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
21084036e387SClaudio Imbrenda /* for consistency */
21094036e387SClaudio Imbrenda #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
21104036e387SClaudio Imbrenda 
2111afdad616SClaudio Imbrenda static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2112afdad616SClaudio Imbrenda 			      u8 *res, unsigned long bufsize)
2113afdad616SClaudio Imbrenda {
2114afdad616SClaudio Imbrenda 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2115afdad616SClaudio Imbrenda 
2116afdad616SClaudio Imbrenda 	args->count = 0;
2117afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2118afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2119afdad616SClaudio Imbrenda 		/*
2120afdad616SClaudio Imbrenda 		 * We return an error if the first value was invalid, but we
2121afdad616SClaudio Imbrenda 		 * return successfully if at least one value was copied.
2122afdad616SClaudio Imbrenda 		 */
2123afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2124afdad616SClaudio Imbrenda 			return args->count ? 0 : -EFAULT;
2125afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2126afdad616SClaudio Imbrenda 			pgstev = 0;
2127afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2128afdad616SClaudio Imbrenda 		cur_gfn++;
2129afdad616SClaudio Imbrenda 	}
2130afdad616SClaudio Imbrenda 
2131afdad616SClaudio Imbrenda 	return 0;
2132afdad616SClaudio Imbrenda }
2133afdad616SClaudio Imbrenda 
2134c928bfc2SMaciej S. Szmigiero static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2135c928bfc2SMaciej S. Szmigiero 						     gfn_t gfn)
2136c928bfc2SMaciej S. Szmigiero {
2137c928bfc2SMaciej S. Szmigiero 	return ____gfn_to_memslot(slots, gfn, true);
2138c928bfc2SMaciej S. Szmigiero }
2139c928bfc2SMaciej S. Szmigiero 
2140afdad616SClaudio Imbrenda static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2141afdad616SClaudio Imbrenda 					      unsigned long cur_gfn)
2142afdad616SClaudio Imbrenda {
2143c928bfc2SMaciej S. Szmigiero 	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2144afdad616SClaudio Imbrenda 	unsigned long ofs = cur_gfn - ms->base_gfn;
2145a54d8066SMaciej S. Szmigiero 	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2146afdad616SClaudio Imbrenda 
2147afdad616SClaudio Imbrenda 	if (ms->base_gfn + ms->npages <= cur_gfn) {
2148a54d8066SMaciej S. Szmigiero 		mnode = rb_next(mnode);
2149afdad616SClaudio Imbrenda 		/* If we are above the highest slot, wrap around */
2150a54d8066SMaciej S. Szmigiero 		if (!mnode)
2151a54d8066SMaciej S. Szmigiero 			mnode = rb_first(&slots->gfn_tree);
2152afdad616SClaudio Imbrenda 
2153a54d8066SMaciej S. Szmigiero 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2154afdad616SClaudio Imbrenda 		ofs = 0;
2155afdad616SClaudio Imbrenda 	}
2156afdad616SClaudio Imbrenda 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2157a54d8066SMaciej S. Szmigiero 	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2158a54d8066SMaciej S. Szmigiero 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2159b5c7e7ecSYury Norov 		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2160afdad616SClaudio Imbrenda 	}
2161afdad616SClaudio Imbrenda 	return ms->base_gfn + ofs;
2162afdad616SClaudio Imbrenda }
2163afdad616SClaudio Imbrenda 
2164afdad616SClaudio Imbrenda static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2165afdad616SClaudio Imbrenda 			     u8 *res, unsigned long bufsize)
2166afdad616SClaudio Imbrenda {
2167afdad616SClaudio Imbrenda 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2168afdad616SClaudio Imbrenda 	struct kvm_memslots *slots = kvm_memslots(kvm);
2169afdad616SClaudio Imbrenda 	struct kvm_memory_slot *ms;
2170afdad616SClaudio Imbrenda 
2171a54d8066SMaciej S. Szmigiero 	if (unlikely(kvm_memslots_empty(slots)))
21720774a964SSean Christopherson 		return 0;
21730774a964SSean Christopherson 
2174afdad616SClaudio Imbrenda 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2175afdad616SClaudio Imbrenda 	ms = gfn_to_memslot(kvm, cur_gfn);
2176afdad616SClaudio Imbrenda 	args->count = 0;
2177afdad616SClaudio Imbrenda 	args->start_gfn = cur_gfn;
2178afdad616SClaudio Imbrenda 	if (!ms)
2179afdad616SClaudio Imbrenda 		return 0;
2180afdad616SClaudio Imbrenda 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
21816a656832SMaciej S. Szmigiero 	mem_end = kvm_s390_get_gfn_end(slots);
2182afdad616SClaudio Imbrenda 
2183afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2184afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2185afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2186afdad616SClaudio Imbrenda 			return 0;
2187afdad616SClaudio Imbrenda 		/* Decrement only if we actually flipped the bit to 0 */
2188afdad616SClaudio Imbrenda 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2189afdad616SClaudio Imbrenda 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2190afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2191afdad616SClaudio Imbrenda 			pgstev = 0;
2192afdad616SClaudio Imbrenda 		/* Save the value */
2193afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2194afdad616SClaudio Imbrenda 		/* If the next bit is too far away, stop. */
2195afdad616SClaudio Imbrenda 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2196afdad616SClaudio Imbrenda 			return 0;
2197afdad616SClaudio Imbrenda 		/* If we reached the previous "next", find the next one */
2198afdad616SClaudio Imbrenda 		if (cur_gfn == next_gfn)
2199afdad616SClaudio Imbrenda 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2200afdad616SClaudio Imbrenda 		/* Reached the end of memory or of the buffer, stop */
2201afdad616SClaudio Imbrenda 		if ((next_gfn >= mem_end) ||
2202afdad616SClaudio Imbrenda 		    (next_gfn - args->start_gfn >= bufsize))
2203afdad616SClaudio Imbrenda 			return 0;
2204afdad616SClaudio Imbrenda 		cur_gfn++;
2205afdad616SClaudio Imbrenda 		/* Reached the end of the current memslot, take the next one. */
2206afdad616SClaudio Imbrenda 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2207afdad616SClaudio Imbrenda 			ms = gfn_to_memslot(kvm, cur_gfn);
2208afdad616SClaudio Imbrenda 			if (!ms)
2209afdad616SClaudio Imbrenda 				return 0;
2210afdad616SClaudio Imbrenda 		}
2211afdad616SClaudio Imbrenda 	}
2212afdad616SClaudio Imbrenda 	return 0;
2213afdad616SClaudio Imbrenda }
2214afdad616SClaudio Imbrenda 
2215afdad616SClaudio Imbrenda /*
22164036e387SClaudio Imbrenda  * This function searches for the next page with dirty CMMA attributes, and
22174036e387SClaudio Imbrenda  * saves the attributes in the buffer up to either the end of the buffer or
22184036e387SClaudio Imbrenda  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
22194036e387SClaudio Imbrenda  * no trailing clean bytes are saved.
22204036e387SClaudio Imbrenda  * In case no dirty bits were found, or if CMMA was not enabled or used, the
22214036e387SClaudio Imbrenda  * output buffer will indicate 0 as length.
22224036e387SClaudio Imbrenda  */
22234036e387SClaudio Imbrenda static int kvm_s390_get_cmma_bits(struct kvm *kvm,
22244036e387SClaudio Imbrenda 				  struct kvm_s390_cmma_log *args)
22254036e387SClaudio Imbrenda {
2226afdad616SClaudio Imbrenda 	unsigned long bufsize;
2227afdad616SClaudio Imbrenda 	int srcu_idx, peek, ret;
2228afdad616SClaudio Imbrenda 	u8 *values;
22294036e387SClaudio Imbrenda 
2230afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
22314036e387SClaudio Imbrenda 		return -ENXIO;
22324036e387SClaudio Imbrenda 	/* Invalid/unsupported flags were specified */
22334036e387SClaudio Imbrenda 	if (args->flags & ~KVM_S390_CMMA_PEEK)
22344036e387SClaudio Imbrenda 		return -EINVAL;
22354036e387SClaudio Imbrenda 	/* Migration mode query, and we are not doing a migration */
22364036e387SClaudio Imbrenda 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2237afdad616SClaudio Imbrenda 	if (!peek && !kvm->arch.migration_mode)
22384036e387SClaudio Imbrenda 		return -EINVAL;
22394036e387SClaudio Imbrenda 	/* CMMA is disabled or was not used, or the buffer has length zero */
22404036e387SClaudio Imbrenda 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2241c9f0a2b8SJanosch Frank 	if (!bufsize || !kvm->mm->context.uses_cmm) {
22424036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
22434036e387SClaudio Imbrenda 		return 0;
22444036e387SClaudio Imbrenda 	}
22454036e387SClaudio Imbrenda 	/* We are not peeking, and there are no dirty pages */
2246afdad616SClaudio Imbrenda 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
22474036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
22484036e387SClaudio Imbrenda 		return 0;
22494036e387SClaudio Imbrenda 	}
22504036e387SClaudio Imbrenda 
2251afdad616SClaudio Imbrenda 	values = vmalloc(bufsize);
2252afdad616SClaudio Imbrenda 	if (!values)
22534036e387SClaudio Imbrenda 		return -ENOMEM;
22544036e387SClaudio Imbrenda 
2255d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
22564036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
2257afdad616SClaudio Imbrenda 	if (peek)
2258afdad616SClaudio Imbrenda 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2259afdad616SClaudio Imbrenda 	else
2260afdad616SClaudio Imbrenda 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
22614036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2262d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
22634036e387SClaudio Imbrenda 
2264afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
2265afdad616SClaudio Imbrenda 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2266afdad616SClaudio Imbrenda 	else
2267afdad616SClaudio Imbrenda 		args->remaining = 0;
22684036e387SClaudio Imbrenda 
2269afdad616SClaudio Imbrenda 	if (copy_to_user((void __user *)args->values, values, args->count))
2270afdad616SClaudio Imbrenda 		ret = -EFAULT;
2271afdad616SClaudio Imbrenda 
2272afdad616SClaudio Imbrenda 	vfree(values);
2273afdad616SClaudio Imbrenda 	return ret;
22744036e387SClaudio Imbrenda }
22754036e387SClaudio Imbrenda 
22764036e387SClaudio Imbrenda /*
22774036e387SClaudio Imbrenda  * This function sets the CMMA attributes for the given pages. If the input
22784036e387SClaudio Imbrenda  * buffer has zero length, no action is taken, otherwise the attributes are
2279c9f0a2b8SJanosch Frank  * set and the mm->context.uses_cmm flag is set.
22804036e387SClaudio Imbrenda  */
22814036e387SClaudio Imbrenda static int kvm_s390_set_cmma_bits(struct kvm *kvm,
22824036e387SClaudio Imbrenda 				  const struct kvm_s390_cmma_log *args)
22834036e387SClaudio Imbrenda {
22844036e387SClaudio Imbrenda 	unsigned long hva, mask, pgstev, i;
22854036e387SClaudio Imbrenda 	uint8_t *bits;
22864036e387SClaudio Imbrenda 	int srcu_idx, r = 0;
22874036e387SClaudio Imbrenda 
22884036e387SClaudio Imbrenda 	mask = args->mask;
22894036e387SClaudio Imbrenda 
22904036e387SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
22914036e387SClaudio Imbrenda 		return -ENXIO;
22924036e387SClaudio Imbrenda 	/* invalid/unsupported flags */
22934036e387SClaudio Imbrenda 	if (args->flags != 0)
22944036e387SClaudio Imbrenda 		return -EINVAL;
22954036e387SClaudio Imbrenda 	/* Enforce sane limit on memory allocation */
22964036e387SClaudio Imbrenda 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
22974036e387SClaudio Imbrenda 		return -EINVAL;
22984036e387SClaudio Imbrenda 	/* Nothing to do */
22994036e387SClaudio Imbrenda 	if (args->count == 0)
23004036e387SClaudio Imbrenda 		return 0;
23014036e387SClaudio Imbrenda 
230242bc47b3SKees Cook 	bits = vmalloc(array_size(sizeof(*bits), args->count));
23034036e387SClaudio Imbrenda 	if (!bits)
23044036e387SClaudio Imbrenda 		return -ENOMEM;
23054036e387SClaudio Imbrenda 
23064036e387SClaudio Imbrenda 	r = copy_from_user(bits, (void __user *)args->values, args->count);
23074036e387SClaudio Imbrenda 	if (r) {
23084036e387SClaudio Imbrenda 		r = -EFAULT;
23094036e387SClaudio Imbrenda 		goto out;
23104036e387SClaudio Imbrenda 	}
23114036e387SClaudio Imbrenda 
2312d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
23134036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
23144036e387SClaudio Imbrenda 	for (i = 0; i < args->count; i++) {
23154036e387SClaudio Imbrenda 		hva = gfn_to_hva(kvm, args->start_gfn + i);
23164036e387SClaudio Imbrenda 		if (kvm_is_error_hva(hva)) {
23174036e387SClaudio Imbrenda 			r = -EFAULT;
23184036e387SClaudio Imbrenda 			break;
23194036e387SClaudio Imbrenda 		}
23204036e387SClaudio Imbrenda 
23214036e387SClaudio Imbrenda 		pgstev = bits[i];
23224036e387SClaudio Imbrenda 		pgstev = pgstev << 24;
23231bab1c02SClaudio Imbrenda 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
23244036e387SClaudio Imbrenda 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
23254036e387SClaudio Imbrenda 	}
23264036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2327d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
23284036e387SClaudio Imbrenda 
2329c9f0a2b8SJanosch Frank 	if (!kvm->mm->context.uses_cmm) {
2330d8ed45c5SMichel Lespinasse 		mmap_write_lock(kvm->mm);
2331c9f0a2b8SJanosch Frank 		kvm->mm->context.uses_cmm = 1;
2332d8ed45c5SMichel Lespinasse 		mmap_write_unlock(kvm->mm);
23334036e387SClaudio Imbrenda 	}
23344036e387SClaudio Imbrenda out:
23354036e387SClaudio Imbrenda 	vfree(bits);
23364036e387SClaudio Imbrenda 	return r;
23374036e387SClaudio Imbrenda }
23384036e387SClaudio Imbrenda 
2339be48d86fSClaudio Imbrenda /**
2340be48d86fSClaudio Imbrenda  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2341be48d86fSClaudio Imbrenda  * non protected.
2342be48d86fSClaudio Imbrenda  * @kvm: the VM whose protected vCPUs are to be converted
2343be48d86fSClaudio Imbrenda  * @rc: return value for the RC field of the UVC (in case of error)
2344be48d86fSClaudio Imbrenda  * @rrc: return value for the RRC field of the UVC (in case of error)
2345be48d86fSClaudio Imbrenda  *
2346be48d86fSClaudio Imbrenda  * Does not stop in case of error, tries to convert as many
2347be48d86fSClaudio Imbrenda  * CPUs as possible. In case of error, the RC and RRC of the last error are
2348be48d86fSClaudio Imbrenda  * returned.
2349be48d86fSClaudio Imbrenda  *
2350be48d86fSClaudio Imbrenda  * Return: 0 in case of success, otherwise -EIO
2351be48d86fSClaudio Imbrenda  */
2352be48d86fSClaudio Imbrenda int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
235329b40f10SJanosch Frank {
235429b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
235546808a4cSMarc Zyngier 	unsigned long i;
2356be48d86fSClaudio Imbrenda 	u16 _rc, _rrc;
2357be48d86fSClaudio Imbrenda 	int ret = 0;
235829b40f10SJanosch Frank 
235929b40f10SJanosch Frank 	/*
236029b40f10SJanosch Frank 	 * We ignore failures and try to destroy as many CPUs as possible.
236129b40f10SJanosch Frank 	 * At the same time we must not free the assigned resources when
236229b40f10SJanosch Frank 	 * this fails, as the ultravisor has still access to that memory.
236329b40f10SJanosch Frank 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
236429b40f10SJanosch Frank 	 * behind.
236529b40f10SJanosch Frank 	 * We want to return the first failure rc and rrc, though.
236629b40f10SJanosch Frank 	 */
236729b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
236829b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
2369be48d86fSClaudio Imbrenda 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2370be48d86fSClaudio Imbrenda 			*rc = _rc;
2371be48d86fSClaudio Imbrenda 			*rrc = _rrc;
237229b40f10SJanosch Frank 			ret = -EIO;
237329b40f10SJanosch Frank 		}
237429b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
237529b40f10SJanosch Frank 	}
2376ee6a569dSMichael Mueller 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2377ee6a569dSMichael Mueller 	if (use_gisa)
2378ee6a569dSMichael Mueller 		kvm_s390_gisa_enable(kvm);
237929b40f10SJanosch Frank 	return ret;
238029b40f10SJanosch Frank }
238129b40f10SJanosch Frank 
2382be48d86fSClaudio Imbrenda /**
2383be48d86fSClaudio Imbrenda  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2384be48d86fSClaudio Imbrenda  * to protected.
2385be48d86fSClaudio Imbrenda  * @kvm: the VM whose protected vCPUs are to be converted
2386be48d86fSClaudio Imbrenda  * @rc: return value for the RC field of the UVC (in case of error)
2387be48d86fSClaudio Imbrenda  * @rrc: return value for the RRC field of the UVC (in case of error)
2388be48d86fSClaudio Imbrenda  *
2389be48d86fSClaudio Imbrenda  * Tries to undo the conversion in case of error.
2390be48d86fSClaudio Imbrenda  *
2391be48d86fSClaudio Imbrenda  * Return: 0 in case of success, otherwise -EIO
2392be48d86fSClaudio Imbrenda  */
239329b40f10SJanosch Frank static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
239429b40f10SJanosch Frank {
239546808a4cSMarc Zyngier 	unsigned long i;
239646808a4cSMarc Zyngier 	int r = 0;
239729b40f10SJanosch Frank 	u16 dummy;
239829b40f10SJanosch Frank 
239929b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
240029b40f10SJanosch Frank 
2401ee6a569dSMichael Mueller 	/* Disable the GISA if the ultravisor does not support AIV. */
2402ee6a569dSMichael Mueller 	if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
2403ee6a569dSMichael Mueller 		kvm_s390_gisa_disable(kvm);
2404ee6a569dSMichael Mueller 
240529b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
240629b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
240729b40f10SJanosch Frank 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
240829b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
240929b40f10SJanosch Frank 		if (r)
241029b40f10SJanosch Frank 			break;
241129b40f10SJanosch Frank 	}
241229b40f10SJanosch Frank 	if (r)
241329b40f10SJanosch Frank 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
241429b40f10SJanosch Frank 	return r;
241529b40f10SJanosch Frank }
241629b40f10SJanosch Frank 
241735d02493SJanosch Frank /*
241835d02493SJanosch Frank  * Here we provide user space with a direct interface to query UV
241935d02493SJanosch Frank  * related data like UV maxima and available features as well as
242035d02493SJanosch Frank  * feature specific data.
242135d02493SJanosch Frank  *
242235d02493SJanosch Frank  * To facilitate future extension of the data structures we'll try to
242335d02493SJanosch Frank  * write data up to the maximum requested length.
242435d02493SJanosch Frank  */
242535d02493SJanosch Frank static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
242635d02493SJanosch Frank {
242735d02493SJanosch Frank 	ssize_t len_min;
242835d02493SJanosch Frank 
242935d02493SJanosch Frank 	switch (info->header.id) {
243035d02493SJanosch Frank 	case KVM_PV_INFO_VM: {
243135d02493SJanosch Frank 		len_min =  sizeof(info->header) + sizeof(info->vm);
243235d02493SJanosch Frank 
243335d02493SJanosch Frank 		if (info->header.len_max < len_min)
243435d02493SJanosch Frank 			return -EINVAL;
243535d02493SJanosch Frank 
243635d02493SJanosch Frank 		memcpy(info->vm.inst_calls_list,
243735d02493SJanosch Frank 		       uv_info.inst_calls_list,
243835d02493SJanosch Frank 		       sizeof(uv_info.inst_calls_list));
243935d02493SJanosch Frank 
244035d02493SJanosch Frank 		/* It's max cpuid not max cpus, so it's off by one */
244135d02493SJanosch Frank 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
244235d02493SJanosch Frank 		info->vm.max_guests = uv_info.max_num_sec_conf;
244335d02493SJanosch Frank 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
244435d02493SJanosch Frank 		info->vm.feature_indication = uv_info.uv_feature_indications;
244535d02493SJanosch Frank 
244635d02493SJanosch Frank 		return len_min;
244735d02493SJanosch Frank 	}
2448fe9a93e0SJanosch Frank 	case KVM_PV_INFO_DUMP: {
2449fe9a93e0SJanosch Frank 		len_min =  sizeof(info->header) + sizeof(info->dump);
2450fe9a93e0SJanosch Frank 
2451fe9a93e0SJanosch Frank 		if (info->header.len_max < len_min)
2452fe9a93e0SJanosch Frank 			return -EINVAL;
2453fe9a93e0SJanosch Frank 
2454fe9a93e0SJanosch Frank 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2455fe9a93e0SJanosch Frank 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2456fe9a93e0SJanosch Frank 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2457fe9a93e0SJanosch Frank 		return len_min;
2458fe9a93e0SJanosch Frank 	}
245935d02493SJanosch Frank 	default:
246035d02493SJanosch Frank 		return -EINVAL;
246135d02493SJanosch Frank 	}
246235d02493SJanosch Frank }
246335d02493SJanosch Frank 
24640460eb35SJanosch Frank static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
24650460eb35SJanosch Frank 			   struct kvm_s390_pv_dmp dmp)
24660460eb35SJanosch Frank {
24670460eb35SJanosch Frank 	int r = -EINVAL;
24680460eb35SJanosch Frank 	void __user *result_buff = (void __user *)dmp.buff_addr;
24690460eb35SJanosch Frank 
24700460eb35SJanosch Frank 	switch (dmp.subcmd) {
24710460eb35SJanosch Frank 	case KVM_PV_DUMP_INIT: {
24720460eb35SJanosch Frank 		if (kvm->arch.pv.dumping)
24730460eb35SJanosch Frank 			break;
24740460eb35SJanosch Frank 
24750460eb35SJanosch Frank 		/*
24760460eb35SJanosch Frank 		 * Block SIE entry as concurrent dump UVCs could lead
24770460eb35SJanosch Frank 		 * to validities.
24780460eb35SJanosch Frank 		 */
24790460eb35SJanosch Frank 		kvm_s390_vcpu_block_all(kvm);
24800460eb35SJanosch Frank 
24810460eb35SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
24820460eb35SJanosch Frank 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
24830460eb35SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
24840460eb35SJanosch Frank 			     cmd->rc, cmd->rrc);
24850460eb35SJanosch Frank 		if (!r) {
24860460eb35SJanosch Frank 			kvm->arch.pv.dumping = true;
24870460eb35SJanosch Frank 		} else {
24880460eb35SJanosch Frank 			kvm_s390_vcpu_unblock_all(kvm);
24890460eb35SJanosch Frank 			r = -EINVAL;
24900460eb35SJanosch Frank 		}
24910460eb35SJanosch Frank 		break;
24920460eb35SJanosch Frank 	}
24930460eb35SJanosch Frank 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
24940460eb35SJanosch Frank 		if (!kvm->arch.pv.dumping)
24950460eb35SJanosch Frank 			break;
24960460eb35SJanosch Frank 
24970460eb35SJanosch Frank 		/*
24980460eb35SJanosch Frank 		 * gaddr is an output parameter since we might stop
24990460eb35SJanosch Frank 		 * early. As dmp will be copied back in our caller, we
25000460eb35SJanosch Frank 		 * don't need to do it ourselves.
25010460eb35SJanosch Frank 		 */
25020460eb35SJanosch Frank 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
25030460eb35SJanosch Frank 						&cmd->rc, &cmd->rrc);
25040460eb35SJanosch Frank 		break;
25050460eb35SJanosch Frank 	}
25060460eb35SJanosch Frank 	case KVM_PV_DUMP_COMPLETE: {
25070460eb35SJanosch Frank 		if (!kvm->arch.pv.dumping)
25080460eb35SJanosch Frank 			break;
25090460eb35SJanosch Frank 
25100460eb35SJanosch Frank 		r = -EINVAL;
25110460eb35SJanosch Frank 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
25120460eb35SJanosch Frank 			break;
25130460eb35SJanosch Frank 
25140460eb35SJanosch Frank 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
25150460eb35SJanosch Frank 					      &cmd->rc, &cmd->rrc);
25160460eb35SJanosch Frank 		break;
25170460eb35SJanosch Frank 	}
25180460eb35SJanosch Frank 	default:
25190460eb35SJanosch Frank 		r = -ENOTTY;
25200460eb35SJanosch Frank 		break;
25210460eb35SJanosch Frank 	}
25220460eb35SJanosch Frank 
25230460eb35SJanosch Frank 	return r;
25240460eb35SJanosch Frank }
25250460eb35SJanosch Frank 
252629b40f10SJanosch Frank static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
252729b40f10SJanosch Frank {
2528fb491d55SClaudio Imbrenda 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2529fb491d55SClaudio Imbrenda 	void __user *argp = (void __user *)cmd->data;
253029b40f10SJanosch Frank 	int r = 0;
253129b40f10SJanosch Frank 	u16 dummy;
2532fb491d55SClaudio Imbrenda 
2533fb491d55SClaudio Imbrenda 	if (need_lock)
2534fb491d55SClaudio Imbrenda 		mutex_lock(&kvm->lock);
253529b40f10SJanosch Frank 
253629b40f10SJanosch Frank 	switch (cmd->cmd) {
253729b40f10SJanosch Frank 	case KVM_PV_ENABLE: {
253829b40f10SJanosch Frank 		r = -EINVAL;
253929b40f10SJanosch Frank 		if (kvm_s390_pv_is_protected(kvm))
254029b40f10SJanosch Frank 			break;
254129b40f10SJanosch Frank 
254229b40f10SJanosch Frank 		/*
254329b40f10SJanosch Frank 		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
254429b40f10SJanosch Frank 		 *  esca, we need no cleanup in the error cases below
254529b40f10SJanosch Frank 		 */
254629b40f10SJanosch Frank 		r = sca_switch_to_extended(kvm);
254729b40f10SJanosch Frank 		if (r)
254829b40f10SJanosch Frank 			break;
254929b40f10SJanosch Frank 
2550d8ed45c5SMichel Lespinasse 		mmap_write_lock(current->mm);
2551fa0c5eabSJanosch Frank 		r = gmap_mark_unmergeable();
2552d8ed45c5SMichel Lespinasse 		mmap_write_unlock(current->mm);
2553fa0c5eabSJanosch Frank 		if (r)
2554fa0c5eabSJanosch Frank 			break;
2555fa0c5eabSJanosch Frank 
255629b40f10SJanosch Frank 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
255729b40f10SJanosch Frank 		if (r)
255829b40f10SJanosch Frank 			break;
255929b40f10SJanosch Frank 
256029b40f10SJanosch Frank 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
256129b40f10SJanosch Frank 		if (r)
256229b40f10SJanosch Frank 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
25630890ddeaSChristian Borntraeger 
25640890ddeaSChristian Borntraeger 		/* we need to block service interrupts from now on */
25650890ddeaSChristian Borntraeger 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
256629b40f10SJanosch Frank 		break;
256729b40f10SJanosch Frank 	}
2568fb491d55SClaudio Imbrenda 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2569fb491d55SClaudio Imbrenda 		r = -EINVAL;
2570fb491d55SClaudio Imbrenda 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2571fb491d55SClaudio Imbrenda 			break;
2572fb491d55SClaudio Imbrenda 
2573fb491d55SClaudio Imbrenda 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2574fb491d55SClaudio Imbrenda 		/*
2575fb491d55SClaudio Imbrenda 		 * If a CPU could not be destroyed, destroy VM will also fail.
2576fb491d55SClaudio Imbrenda 		 * There is no point in trying to destroy it. Instead return
2577fb491d55SClaudio Imbrenda 		 * the rc and rrc from the first CPU that failed destroying.
2578fb491d55SClaudio Imbrenda 		 */
2579fb491d55SClaudio Imbrenda 		if (r)
2580fb491d55SClaudio Imbrenda 			break;
2581fb491d55SClaudio Imbrenda 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2582fb491d55SClaudio Imbrenda 
2583fb491d55SClaudio Imbrenda 		/* no need to block service interrupts any more */
2584fb491d55SClaudio Imbrenda 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2585fb491d55SClaudio Imbrenda 		break;
2586fb491d55SClaudio Imbrenda 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2587fb491d55SClaudio Imbrenda 		r = -EINVAL;
2588fb491d55SClaudio Imbrenda 		if (!async_destroy)
2589fb491d55SClaudio Imbrenda 			break;
2590fb491d55SClaudio Imbrenda 		/* kvm->lock must not be held; this is asserted inside the function. */
2591fb491d55SClaudio Imbrenda 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2592fb491d55SClaudio Imbrenda 		break;
259329b40f10SJanosch Frank 	case KVM_PV_DISABLE: {
259429b40f10SJanosch Frank 		r = -EINVAL;
259529b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
259629b40f10SJanosch Frank 			break;
259729b40f10SJanosch Frank 
259829b40f10SJanosch Frank 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
259929b40f10SJanosch Frank 		/*
260029b40f10SJanosch Frank 		 * If a CPU could not be destroyed, destroy VM will also fail.
260129b40f10SJanosch Frank 		 * There is no point in trying to destroy it. Instead return
260229b40f10SJanosch Frank 		 * the rc and rrc from the first CPU that failed destroying.
260329b40f10SJanosch Frank 		 */
260429b40f10SJanosch Frank 		if (r)
260529b40f10SJanosch Frank 			break;
2606fb491d55SClaudio Imbrenda 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
26070890ddeaSChristian Borntraeger 
26080890ddeaSChristian Borntraeger 		/* no need to block service interrupts any more */
26090890ddeaSChristian Borntraeger 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
261029b40f10SJanosch Frank 		break;
261129b40f10SJanosch Frank 	}
261229b40f10SJanosch Frank 	case KVM_PV_SET_SEC_PARMS: {
261329b40f10SJanosch Frank 		struct kvm_s390_pv_sec_parm parms = {};
261429b40f10SJanosch Frank 		void *hdr;
261529b40f10SJanosch Frank 
261629b40f10SJanosch Frank 		r = -EINVAL;
261729b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
261829b40f10SJanosch Frank 			break;
261929b40f10SJanosch Frank 
262029b40f10SJanosch Frank 		r = -EFAULT;
262129b40f10SJanosch Frank 		if (copy_from_user(&parms, argp, sizeof(parms)))
262229b40f10SJanosch Frank 			break;
262329b40f10SJanosch Frank 
262429b40f10SJanosch Frank 		/* Currently restricted to 8KB */
262529b40f10SJanosch Frank 		r = -EINVAL;
262629b40f10SJanosch Frank 		if (parms.length > PAGE_SIZE * 2)
262729b40f10SJanosch Frank 			break;
262829b40f10SJanosch Frank 
262929b40f10SJanosch Frank 		r = -ENOMEM;
263029b40f10SJanosch Frank 		hdr = vmalloc(parms.length);
263129b40f10SJanosch Frank 		if (!hdr)
263229b40f10SJanosch Frank 			break;
263329b40f10SJanosch Frank 
263429b40f10SJanosch Frank 		r = -EFAULT;
263529b40f10SJanosch Frank 		if (!copy_from_user(hdr, (void __user *)parms.origin,
263629b40f10SJanosch Frank 				    parms.length))
263729b40f10SJanosch Frank 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
263829b40f10SJanosch Frank 						      &cmd->rc, &cmd->rrc);
263929b40f10SJanosch Frank 
264029b40f10SJanosch Frank 		vfree(hdr);
264129b40f10SJanosch Frank 		break;
264229b40f10SJanosch Frank 	}
264329b40f10SJanosch Frank 	case KVM_PV_UNPACK: {
264429b40f10SJanosch Frank 		struct kvm_s390_pv_unp unp = {};
264529b40f10SJanosch Frank 
264629b40f10SJanosch Frank 		r = -EINVAL;
26471ed576a2SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
264829b40f10SJanosch Frank 			break;
264929b40f10SJanosch Frank 
265029b40f10SJanosch Frank 		r = -EFAULT;
265129b40f10SJanosch Frank 		if (copy_from_user(&unp, argp, sizeof(unp)))
265229b40f10SJanosch Frank 			break;
265329b40f10SJanosch Frank 
265429b40f10SJanosch Frank 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
265529b40f10SJanosch Frank 				       &cmd->rc, &cmd->rrc);
265629b40f10SJanosch Frank 		break;
265729b40f10SJanosch Frank 	}
265829b40f10SJanosch Frank 	case KVM_PV_VERIFY: {
265929b40f10SJanosch Frank 		r = -EINVAL;
266029b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
266129b40f10SJanosch Frank 			break;
266229b40f10SJanosch Frank 
266329b40f10SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
266429b40f10SJanosch Frank 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
266529b40f10SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
266629b40f10SJanosch Frank 			     cmd->rrc);
266729b40f10SJanosch Frank 		break;
266829b40f10SJanosch Frank 	}
2669e0d2773dSJanosch Frank 	case KVM_PV_PREP_RESET: {
2670e0d2773dSJanosch Frank 		r = -EINVAL;
2671e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2672e0d2773dSJanosch Frank 			break;
2673e0d2773dSJanosch Frank 
2674e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2675e0d2773dSJanosch Frank 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2676e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2677e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2678e0d2773dSJanosch Frank 		break;
2679e0d2773dSJanosch Frank 	}
2680e0d2773dSJanosch Frank 	case KVM_PV_UNSHARE_ALL: {
2681e0d2773dSJanosch Frank 		r = -EINVAL;
2682e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2683e0d2773dSJanosch Frank 			break;
2684e0d2773dSJanosch Frank 
2685e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2686e0d2773dSJanosch Frank 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2687e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2688e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2689e0d2773dSJanosch Frank 		break;
2690e0d2773dSJanosch Frank 	}
269135d02493SJanosch Frank 	case KVM_PV_INFO: {
269235d02493SJanosch Frank 		struct kvm_s390_pv_info info = {};
269335d02493SJanosch Frank 		ssize_t data_len;
269435d02493SJanosch Frank 
269535d02493SJanosch Frank 		/*
269635d02493SJanosch Frank 		 * No need to check the VM protection here.
269735d02493SJanosch Frank 		 *
269835d02493SJanosch Frank 		 * Maybe user space wants to query some of the data
269935d02493SJanosch Frank 		 * when the VM is still unprotected. If we see the
270035d02493SJanosch Frank 		 * need to fence a new data command we can still
270135d02493SJanosch Frank 		 * return an error in the info handler.
270235d02493SJanosch Frank 		 */
270335d02493SJanosch Frank 
270435d02493SJanosch Frank 		r = -EFAULT;
270535d02493SJanosch Frank 		if (copy_from_user(&info, argp, sizeof(info.header)))
270635d02493SJanosch Frank 			break;
270735d02493SJanosch Frank 
270835d02493SJanosch Frank 		r = -EINVAL;
270935d02493SJanosch Frank 		if (info.header.len_max < sizeof(info.header))
271035d02493SJanosch Frank 			break;
271135d02493SJanosch Frank 
271235d02493SJanosch Frank 		data_len = kvm_s390_handle_pv_info(&info);
271335d02493SJanosch Frank 		if (data_len < 0) {
271435d02493SJanosch Frank 			r = data_len;
271535d02493SJanosch Frank 			break;
271635d02493SJanosch Frank 		}
271735d02493SJanosch Frank 		/*
271835d02493SJanosch Frank 		 * If a data command struct is extended (multiple
271935d02493SJanosch Frank 		 * times) this can be used to determine how much of it
272035d02493SJanosch Frank 		 * is valid.
272135d02493SJanosch Frank 		 */
272235d02493SJanosch Frank 		info.header.len_written = data_len;
272335d02493SJanosch Frank 
272435d02493SJanosch Frank 		r = -EFAULT;
272535d02493SJanosch Frank 		if (copy_to_user(argp, &info, data_len))
272635d02493SJanosch Frank 			break;
272735d02493SJanosch Frank 
272835d02493SJanosch Frank 		r = 0;
272935d02493SJanosch Frank 		break;
273035d02493SJanosch Frank 	}
27310460eb35SJanosch Frank 	case KVM_PV_DUMP: {
27320460eb35SJanosch Frank 		struct kvm_s390_pv_dmp dmp;
27330460eb35SJanosch Frank 
27340460eb35SJanosch Frank 		r = -EINVAL;
27350460eb35SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
27360460eb35SJanosch Frank 			break;
27370460eb35SJanosch Frank 
27380460eb35SJanosch Frank 		r = -EFAULT;
27390460eb35SJanosch Frank 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
27400460eb35SJanosch Frank 			break;
27410460eb35SJanosch Frank 
27420460eb35SJanosch Frank 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
27430460eb35SJanosch Frank 		if (r)
27440460eb35SJanosch Frank 			break;
27450460eb35SJanosch Frank 
27460460eb35SJanosch Frank 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
27470460eb35SJanosch Frank 			r = -EFAULT;
27480460eb35SJanosch Frank 			break;
27490460eb35SJanosch Frank 		}
27500460eb35SJanosch Frank 
27510460eb35SJanosch Frank 		break;
27520460eb35SJanosch Frank 	}
275329b40f10SJanosch Frank 	default:
275429b40f10SJanosch Frank 		r = -ENOTTY;
275529b40f10SJanosch Frank 	}
2756fb491d55SClaudio Imbrenda 	if (need_lock)
2757fb491d55SClaudio Imbrenda 		mutex_unlock(&kvm->lock);
2758fb491d55SClaudio Imbrenda 
275929b40f10SJanosch Frank 	return r;
276029b40f10SJanosch Frank }
276129b40f10SJanosch Frank 
2762e9e9feebSJanis Schoetterl-Glausch static bool access_key_invalid(u8 access_key)
2763e9e9feebSJanis Schoetterl-Glausch {
2764e9e9feebSJanis Schoetterl-Glausch 	return access_key > 0xf;
2765e9e9feebSJanis Schoetterl-Glausch }
2766e9e9feebSJanis Schoetterl-Glausch 
2767ef11c946SJanis Schoetterl-Glausch static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2768ef11c946SJanis Schoetterl-Glausch {
2769ef11c946SJanis Schoetterl-Glausch 	void __user *uaddr = (void __user *)mop->buf;
2770ef11c946SJanis Schoetterl-Glausch 	u64 supported_flags;
2771ef11c946SJanis Schoetterl-Glausch 	void *tmpbuf = NULL;
2772ef11c946SJanis Schoetterl-Glausch 	int r, srcu_idx;
2773ef11c946SJanis Schoetterl-Glausch 
2774ef11c946SJanis Schoetterl-Glausch 	supported_flags = KVM_S390_MEMOP_F_SKEY_PROTECTION
2775ef11c946SJanis Schoetterl-Glausch 			  | KVM_S390_MEMOP_F_CHECK_ONLY;
27763d9042f8SJanis Schoetterl-Glausch 	if (mop->flags & ~supported_flags || !mop->size)
2777ef11c946SJanis Schoetterl-Glausch 		return -EINVAL;
2778ef11c946SJanis Schoetterl-Glausch 	if (mop->size > MEM_OP_MAX_SIZE)
2779ef11c946SJanis Schoetterl-Glausch 		return -E2BIG;
2780b5d12744SJanis Schoetterl-Glausch 	/*
2781b5d12744SJanis Schoetterl-Glausch 	 * This is technically a heuristic only, if the kvm->lock is not
2782b5d12744SJanis Schoetterl-Glausch 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2783b5d12744SJanis Schoetterl-Glausch 	 * This is ok from a kernel perspective, wrongdoing is detected
2784b5d12744SJanis Schoetterl-Glausch 	 * on the access, -EFAULT is returned and the vm may crash the
2785b5d12744SJanis Schoetterl-Glausch 	 * next time it accesses the memory in question.
2786b5d12744SJanis Schoetterl-Glausch 	 * There is no sane usecase to do switching and a memop on two
2787b5d12744SJanis Schoetterl-Glausch 	 * different CPUs at the same time.
2788b5d12744SJanis Schoetterl-Glausch 	 */
2789b5d12744SJanis Schoetterl-Glausch 	if (kvm_s390_pv_get_handle(kvm))
2790ef11c946SJanis Schoetterl-Glausch 		return -EINVAL;
2791ef11c946SJanis Schoetterl-Glausch 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2792ef11c946SJanis Schoetterl-Glausch 		if (access_key_invalid(mop->key))
2793ef11c946SJanis Schoetterl-Glausch 			return -EINVAL;
2794ef11c946SJanis Schoetterl-Glausch 	} else {
2795ef11c946SJanis Schoetterl-Glausch 		mop->key = 0;
2796ef11c946SJanis Schoetterl-Glausch 	}
2797ef11c946SJanis Schoetterl-Glausch 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2798ef11c946SJanis Schoetterl-Glausch 		tmpbuf = vmalloc(mop->size);
2799ef11c946SJanis Schoetterl-Glausch 		if (!tmpbuf)
2800ef11c946SJanis Schoetterl-Glausch 			return -ENOMEM;
2801ef11c946SJanis Schoetterl-Glausch 	}
2802ef11c946SJanis Schoetterl-Glausch 
2803ef11c946SJanis Schoetterl-Glausch 	srcu_idx = srcu_read_lock(&kvm->srcu);
2804ef11c946SJanis Schoetterl-Glausch 
2805ef11c946SJanis Schoetterl-Glausch 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2806ef11c946SJanis Schoetterl-Glausch 		r = PGM_ADDRESSING;
2807ef11c946SJanis Schoetterl-Glausch 		goto out_unlock;
2808ef11c946SJanis Schoetterl-Glausch 	}
2809ef11c946SJanis Schoetterl-Glausch 
2810ef11c946SJanis Schoetterl-Glausch 	switch (mop->op) {
2811ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEMOP_ABSOLUTE_READ: {
2812ef11c946SJanis Schoetterl-Glausch 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2813ef11c946SJanis Schoetterl-Glausch 			r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_FETCH, mop->key);
2814ef11c946SJanis Schoetterl-Glausch 		} else {
2815ef11c946SJanis Schoetterl-Glausch 			r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2816ef11c946SJanis Schoetterl-Glausch 						      mop->size, GACC_FETCH, mop->key);
2817ef11c946SJanis Schoetterl-Glausch 			if (r == 0) {
2818ef11c946SJanis Schoetterl-Glausch 				if (copy_to_user(uaddr, tmpbuf, mop->size))
2819ef11c946SJanis Schoetterl-Glausch 					r = -EFAULT;
2820ef11c946SJanis Schoetterl-Glausch 			}
2821ef11c946SJanis Schoetterl-Glausch 		}
2822ef11c946SJanis Schoetterl-Glausch 		break;
2823ef11c946SJanis Schoetterl-Glausch 	}
2824ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEMOP_ABSOLUTE_WRITE: {
2825ef11c946SJanis Schoetterl-Glausch 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2826ef11c946SJanis Schoetterl-Glausch 			r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_STORE, mop->key);
2827ef11c946SJanis Schoetterl-Glausch 		} else {
2828ef11c946SJanis Schoetterl-Glausch 			if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2829ef11c946SJanis Schoetterl-Glausch 				r = -EFAULT;
2830ef11c946SJanis Schoetterl-Glausch 				break;
2831ef11c946SJanis Schoetterl-Glausch 			}
2832ef11c946SJanis Schoetterl-Glausch 			r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2833ef11c946SJanis Schoetterl-Glausch 						      mop->size, GACC_STORE, mop->key);
2834ef11c946SJanis Schoetterl-Glausch 		}
2835ef11c946SJanis Schoetterl-Glausch 		break;
2836ef11c946SJanis Schoetterl-Glausch 	}
2837ef11c946SJanis Schoetterl-Glausch 	default:
2838ef11c946SJanis Schoetterl-Glausch 		r = -EINVAL;
2839ef11c946SJanis Schoetterl-Glausch 	}
2840ef11c946SJanis Schoetterl-Glausch 
2841ef11c946SJanis Schoetterl-Glausch out_unlock:
2842ef11c946SJanis Schoetterl-Glausch 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2843ef11c946SJanis Schoetterl-Glausch 
2844ef11c946SJanis Schoetterl-Glausch 	vfree(tmpbuf);
2845ef11c946SJanis Schoetterl-Glausch 	return r;
2846ef11c946SJanis Schoetterl-Glausch }
2847ef11c946SJanis Schoetterl-Glausch 
2848b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
2849b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
2850b0c632dbSHeiko Carstens {
2851b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
2852b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2853f2061656SDominik Dingel 	struct kvm_device_attr attr;
2854b0c632dbSHeiko Carstens 	int r;
2855b0c632dbSHeiko Carstens 
2856b0c632dbSHeiko Carstens 	switch (ioctl) {
2857ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
2858ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2859ba5c1e9bSCarsten Otte 
2860ba5c1e9bSCarsten Otte 		r = -EFAULT;
2861ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2862ba5c1e9bSCarsten Otte 			break;
2863ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
2864ba5c1e9bSCarsten Otte 		break;
2865ba5c1e9bSCarsten Otte 	}
286684223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
286784223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
286884223598SCornelia Huck 
286984223598SCornelia Huck 		r = -EINVAL;
287084223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
287184223598SCornelia Huck 			/* Set up dummy routing. */
287284223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
2873152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
287484223598SCornelia Huck 		}
287584223598SCornelia Huck 		break;
287684223598SCornelia Huck 	}
2877f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
2878f2061656SDominik Dingel 		r = -EFAULT;
2879f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2880f2061656SDominik Dingel 			break;
2881f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
2882f2061656SDominik Dingel 		break;
2883f2061656SDominik Dingel 	}
2884f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
2885f2061656SDominik Dingel 		r = -EFAULT;
2886f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2887f2061656SDominik Dingel 			break;
2888f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
2889f2061656SDominik Dingel 		break;
2890f2061656SDominik Dingel 	}
2891f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
2892f2061656SDominik Dingel 		r = -EFAULT;
2893f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2894f2061656SDominik Dingel 			break;
2895f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
2896f2061656SDominik Dingel 		break;
2897f2061656SDominik Dingel 	}
289830ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
289930ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
290030ee2a98SJason J. Herne 
290130ee2a98SJason J. Herne 		r = -EFAULT;
290230ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
290330ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
290430ee2a98SJason J. Herne 			break;
290530ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
290630ee2a98SJason J. Herne 		break;
290730ee2a98SJason J. Herne 	}
290830ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
290930ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
291030ee2a98SJason J. Herne 
291130ee2a98SJason J. Herne 		r = -EFAULT;
291230ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
291330ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
291430ee2a98SJason J. Herne 			break;
291530ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
291630ee2a98SJason J. Herne 		break;
291730ee2a98SJason J. Herne 	}
29184036e387SClaudio Imbrenda 	case KVM_S390_GET_CMMA_BITS: {
29194036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
29204036e387SClaudio Imbrenda 
29214036e387SClaudio Imbrenda 		r = -EFAULT;
29224036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
29234036e387SClaudio Imbrenda 			break;
29241de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
29254036e387SClaudio Imbrenda 		r = kvm_s390_get_cmma_bits(kvm, &args);
29261de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
29274036e387SClaudio Imbrenda 		if (!r) {
29284036e387SClaudio Imbrenda 			r = copy_to_user(argp, &args, sizeof(args));
29294036e387SClaudio Imbrenda 			if (r)
29304036e387SClaudio Imbrenda 				r = -EFAULT;
29314036e387SClaudio Imbrenda 		}
29324036e387SClaudio Imbrenda 		break;
29334036e387SClaudio Imbrenda 	}
29344036e387SClaudio Imbrenda 	case KVM_S390_SET_CMMA_BITS: {
29354036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
29364036e387SClaudio Imbrenda 
29374036e387SClaudio Imbrenda 		r = -EFAULT;
29384036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
29394036e387SClaudio Imbrenda 			break;
29401de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
29414036e387SClaudio Imbrenda 		r = kvm_s390_set_cmma_bits(kvm, &args);
29421de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
29434036e387SClaudio Imbrenda 		break;
29444036e387SClaudio Imbrenda 	}
294529b40f10SJanosch Frank 	case KVM_S390_PV_COMMAND: {
294629b40f10SJanosch Frank 		struct kvm_pv_cmd args;
294729b40f10SJanosch Frank 
294867cf68b6SEric Farman 		/* protvirt means user cpu state */
294967cf68b6SEric Farman 		kvm_s390_set_user_cpu_state_ctrl(kvm);
295029b40f10SJanosch Frank 		r = 0;
295129b40f10SJanosch Frank 		if (!is_prot_virt_host()) {
295229b40f10SJanosch Frank 			r = -EINVAL;
295329b40f10SJanosch Frank 			break;
295429b40f10SJanosch Frank 		}
295529b40f10SJanosch Frank 		if (copy_from_user(&args, argp, sizeof(args))) {
295629b40f10SJanosch Frank 			r = -EFAULT;
295729b40f10SJanosch Frank 			break;
295829b40f10SJanosch Frank 		}
295929b40f10SJanosch Frank 		if (args.flags) {
296029b40f10SJanosch Frank 			r = -EINVAL;
296129b40f10SJanosch Frank 			break;
296229b40f10SJanosch Frank 		}
2963fb491d55SClaudio Imbrenda 		/* must be called without kvm->lock */
296429b40f10SJanosch Frank 		r = kvm_s390_handle_pv(kvm, &args);
296529b40f10SJanosch Frank 		if (copy_to_user(argp, &args, sizeof(args))) {
296629b40f10SJanosch Frank 			r = -EFAULT;
296729b40f10SJanosch Frank 			break;
296829b40f10SJanosch Frank 		}
296929b40f10SJanosch Frank 		break;
297029b40f10SJanosch Frank 	}
2971ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEM_OP: {
2972ef11c946SJanis Schoetterl-Glausch 		struct kvm_s390_mem_op mem_op;
2973ef11c946SJanis Schoetterl-Glausch 
2974ef11c946SJanis Schoetterl-Glausch 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2975ef11c946SJanis Schoetterl-Glausch 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
2976ef11c946SJanis Schoetterl-Glausch 		else
2977ef11c946SJanis Schoetterl-Glausch 			r = -EFAULT;
2978ef11c946SJanis Schoetterl-Glausch 		break;
2979ef11c946SJanis Schoetterl-Glausch 	}
2980db1c875eSMatthew Rosato 	case KVM_S390_ZPCI_OP: {
2981db1c875eSMatthew Rosato 		struct kvm_s390_zpci_op args;
2982db1c875eSMatthew Rosato 
2983db1c875eSMatthew Rosato 		r = -EINVAL;
2984db1c875eSMatthew Rosato 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
2985db1c875eSMatthew Rosato 			break;
2986db1c875eSMatthew Rosato 		if (copy_from_user(&args, argp, sizeof(args))) {
2987db1c875eSMatthew Rosato 			r = -EFAULT;
2988db1c875eSMatthew Rosato 			break;
2989db1c875eSMatthew Rosato 		}
2990db1c875eSMatthew Rosato 		r = kvm_s390_pci_zpci_op(kvm, &args);
2991db1c875eSMatthew Rosato 		break;
2992db1c875eSMatthew Rosato 	}
2993b0c632dbSHeiko Carstens 	default:
2994367e1319SAvi Kivity 		r = -ENOTTY;
2995b0c632dbSHeiko Carstens 	}
2996b0c632dbSHeiko Carstens 
2997b0c632dbSHeiko Carstens 	return r;
2998b0c632dbSHeiko Carstens }
2999b0c632dbSHeiko Carstens 
300045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
300145c9b47cSTony Krowiak {
3002e585b24aSTony Krowiak 	struct ap_config_info info;
300345c9b47cSTony Krowiak 
3004e585b24aSTony Krowiak 	if (ap_instructions_available()) {
3005e585b24aSTony Krowiak 		if (ap_qci(&info) == 0)
3006e585b24aSTony Krowiak 			return info.apxa;
300745c9b47cSTony Krowiak 	}
300845c9b47cSTony Krowiak 
300945c9b47cSTony Krowiak 	return 0;
301045c9b47cSTony Krowiak }
301145c9b47cSTony Krowiak 
3012e585b24aSTony Krowiak /*
3013e585b24aSTony Krowiak  * The format of the crypto control block (CRYCB) is specified in the 3 low
3014e585b24aSTony Krowiak  * order bits of the CRYCB designation (CRYCBD) field as follows:
3015e585b24aSTony Krowiak  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3016e585b24aSTony Krowiak  *	     AP extended addressing (APXA) facility are installed.
3017e585b24aSTony Krowiak  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3018e585b24aSTony Krowiak  * Format 2: Both the APXA and MSAX3 facilities are installed
3019e585b24aSTony Krowiak  */
302045c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
302145c9b47cSTony Krowiak {
302245c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
302345c9b47cSTony Krowiak 
3024e585b24aSTony Krowiak 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3025e585b24aSTony Krowiak 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3026e585b24aSTony Krowiak 
3027e585b24aSTony Krowiak 	/* Check whether MSAX3 is installed */
3028e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
3029e585b24aSTony Krowiak 		return;
3030e585b24aSTony Krowiak 
303145c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
303245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
303345c9b47cSTony Krowiak 	else
303445c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
303545c9b47cSTony Krowiak }
303645c9b47cSTony Krowiak 
303786956e70STony Krowiak /*
303886956e70STony Krowiak  * kvm_arch_crypto_set_masks
303986956e70STony Krowiak  *
304086956e70STony Krowiak  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
304186956e70STony Krowiak  *	 to be set.
304286956e70STony Krowiak  * @apm: the mask identifying the accessible AP adapters
304386956e70STony Krowiak  * @aqm: the mask identifying the accessible AP domains
304486956e70STony Krowiak  * @adm: the mask identifying the accessible AP control domains
304586956e70STony Krowiak  *
304686956e70STony Krowiak  * Set the masks that identify the adapters, domains and control domains to
304786956e70STony Krowiak  * which the KVM guest is granted access.
304886956e70STony Krowiak  *
304986956e70STony Krowiak  * Note: The kvm->lock mutex must be locked by the caller before invoking this
305086956e70STony Krowiak  *	 function.
305186956e70STony Krowiak  */
30520e237e44SPierre Morel void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
30530e237e44SPierre Morel 			       unsigned long *aqm, unsigned long *adm)
30540e237e44SPierre Morel {
30550e237e44SPierre Morel 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
30560e237e44SPierre Morel 
30570e237e44SPierre Morel 	kvm_s390_vcpu_block_all(kvm);
30580e237e44SPierre Morel 
30590e237e44SPierre Morel 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
30600e237e44SPierre Morel 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
30610e237e44SPierre Morel 		memcpy(crycb->apcb1.apm, apm, 32);
30620e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
30630e237e44SPierre Morel 			 apm[0], apm[1], apm[2], apm[3]);
30640e237e44SPierre Morel 		memcpy(crycb->apcb1.aqm, aqm, 32);
30650e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
30660e237e44SPierre Morel 			 aqm[0], aqm[1], aqm[2], aqm[3]);
30670e237e44SPierre Morel 		memcpy(crycb->apcb1.adm, adm, 32);
30680e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
30690e237e44SPierre Morel 			 adm[0], adm[1], adm[2], adm[3]);
30700e237e44SPierre Morel 		break;
30710e237e44SPierre Morel 	case CRYCB_FORMAT1:
30720e237e44SPierre Morel 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
30730e237e44SPierre Morel 		memcpy(crycb->apcb0.apm, apm, 8);
30740e237e44SPierre Morel 		memcpy(crycb->apcb0.aqm, aqm, 2);
30750e237e44SPierre Morel 		memcpy(crycb->apcb0.adm, adm, 2);
30760e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
30770e237e44SPierre Morel 			 apm[0], *((unsigned short *)aqm),
30780e237e44SPierre Morel 			 *((unsigned short *)adm));
30790e237e44SPierre Morel 		break;
30800e237e44SPierre Morel 	default:	/* Can not happen */
30810e237e44SPierre Morel 		break;
30820e237e44SPierre Morel 	}
30830e237e44SPierre Morel 
30840e237e44SPierre Morel 	/* recreate the shadow crycb for each vcpu */
30850e237e44SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
30860e237e44SPierre Morel 	kvm_s390_vcpu_unblock_all(kvm);
30870e237e44SPierre Morel }
30880e237e44SPierre Morel EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
30890e237e44SPierre Morel 
309086956e70STony Krowiak /*
309186956e70STony Krowiak  * kvm_arch_crypto_clear_masks
309286956e70STony Krowiak  *
309386956e70STony Krowiak  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
309486956e70STony Krowiak  *	 to be cleared.
309586956e70STony Krowiak  *
309686956e70STony Krowiak  * Clear the masks that identify the adapters, domains and control domains to
309786956e70STony Krowiak  * which the KVM guest is granted access.
309886956e70STony Krowiak  *
309986956e70STony Krowiak  * Note: The kvm->lock mutex must be locked by the caller before invoking this
310086956e70STony Krowiak  *	 function.
310186956e70STony Krowiak  */
310242104598STony Krowiak void kvm_arch_crypto_clear_masks(struct kvm *kvm)
310342104598STony Krowiak {
310442104598STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
310542104598STony Krowiak 
310642104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
310742104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb0));
310842104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
310942104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb1));
311042104598STony Krowiak 
31110e237e44SPierre Morel 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
31126cc571b1SPierre Morel 	/* recreate the shadow crycb for each vcpu */
31136cc571b1SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
311442104598STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
311542104598STony Krowiak }
311642104598STony Krowiak EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
311742104598STony Krowiak 
31189bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
31199d8d5786SMichael Mueller {
31209bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
31219bb0ec09SDavid Hildenbrand 
31229bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
31239bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
31249bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
31259d8d5786SMichael Mueller }
31269d8d5786SMichael Mueller 
3127c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
31285102ee87STony Krowiak {
3129c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
313045c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
31311e753732STony Krowiak 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
31325102ee87STony Krowiak 
3133e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
3134e585b24aSTony Krowiak 		return;
3135e585b24aSTony Krowiak 
3136ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
3137ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
3138ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
3139ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3140ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3141ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3142ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
31435102ee87STony Krowiak }
31445102ee87STony Krowiak 
31457d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
31467d43bafcSEugene (jno) Dvurechenski {
31477d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
31485e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
31497d43bafcSEugene (jno) Dvurechenski 	else
31507d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
31517d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
31527d43bafcSEugene (jno) Dvurechenski }
31537d43bafcSEugene (jno) Dvurechenski 
315409340b2fSMatthew Rosato void kvm_arch_free_vm(struct kvm *kvm)
315509340b2fSMatthew Rosato {
315609340b2fSMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
315709340b2fSMatthew Rosato 		kvm_s390_pci_clear_list(kvm);
315809340b2fSMatthew Rosato 
315909340b2fSMatthew Rosato 	__kvm_arch_free_vm(kvm);
316009340b2fSMatthew Rosato }
316109340b2fSMatthew Rosato 
3162e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3163b0c632dbSHeiko Carstens {
3164c4196218SChristian Borntraeger 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
31659d8d5786SMichael Mueller 	int i, rc;
3166b0c632dbSHeiko Carstens 	char debug_name[16];
3167f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
3168b0c632dbSHeiko Carstens 
3169e08b9637SCarsten Otte 	rc = -EINVAL;
3170e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3171e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
3172e08b9637SCarsten Otte 		goto out_err;
3173e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3174e08b9637SCarsten Otte 		goto out_err;
3175e08b9637SCarsten Otte #else
3176e08b9637SCarsten Otte 	if (type)
3177e08b9637SCarsten Otte 		goto out_err;
3178e08b9637SCarsten Otte #endif
3179e08b9637SCarsten Otte 
3180b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
3181b0c632dbSHeiko Carstens 	if (rc)
3182d89f5effSJan Kiszka 		goto out_err;
3183b0c632dbSHeiko Carstens 
3184b290411aSCarsten Otte 	rc = -ENOMEM;
3185b290411aSCarsten Otte 
318676a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
318776a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
31885e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
31899ac96d75SDavid Hildenbrand 	/* start with basic SCA */
319076a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3191b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
3192d89f5effSJan Kiszka 		goto out_err;
31930d9ce162SJunaid Shahid 	mutex_lock(&kvm_lock);
3194c5c2c393SDavid Hildenbrand 	sca_offset += 16;
3195bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3196c5c2c393SDavid Hildenbrand 		sca_offset = 0;
3197bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
3198bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
31990d9ce162SJunaid Shahid 	mutex_unlock(&kvm_lock);
3200b0c632dbSHeiko Carstens 
3201b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
3202b0c632dbSHeiko Carstens 
32031cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3204b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
320540f5b735SDominik Dingel 		goto out_err;
3206b0c632dbSHeiko Carstens 
320719114bebSMichael Mueller 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3208c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
3209c4196218SChristian Borntraeger 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3210c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
321140f5b735SDominik Dingel 		goto out_err;
32129d8d5786SMichael Mueller 
321325c84dbaSMichael Mueller 	kvm->arch.sie_page2->kvm = kvm;
3214c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3215c3b9e3e1SChristian Borntraeger 
3216c3b9e3e1SChristian Borntraeger 	for (i = 0; i < kvm_s390_fac_size(); i++) {
321717e89e13SSven Schnelle 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3218c3b9e3e1SChristian Borntraeger 					      (kvm_s390_fac_base[i] |
3219c3b9e3e1SChristian Borntraeger 					       kvm_s390_fac_ext[i]);
322017e89e13SSven Schnelle 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3221c3b9e3e1SChristian Borntraeger 					      kvm_s390_fac_base[i];
3222c3b9e3e1SChristian Borntraeger 	}
3223346fa2f8SChristian Borntraeger 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3224981467c9SMichael Mueller 
32251935222dSDavid Hildenbrand 	/* we are always in czam mode - even on pre z14 machines */
32261935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
32271935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_list, 138);
32281935222dSDavid Hildenbrand 	/* we emulate STHYI in kvm */
322995ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
323095ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
32311bab1c02SClaudio Imbrenda 	if (MACHINE_HAS_TLB_GUEST) {
32321bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
32331bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_list, 147);
32341bab1c02SClaudio Imbrenda 	}
323595ca2cb5SJanosch Frank 
323605f31e3bSPierre Morel 	if (css_general_characteristics.aiv && test_facility(65))
323705f31e3bSPierre Morel 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
323805f31e3bSPierre Morel 
32399bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
324037c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
32419d8d5786SMichael Mueller 
3242c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
32435102ee87STony Krowiak 
324409340b2fSMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
324509340b2fSMatthew Rosato 		mutex_lock(&kvm->lock);
324609340b2fSMatthew Rosato 		kvm_s390_pci_init_list(kvm);
324709340b2fSMatthew Rosato 		kvm_s390_vcpu_pci_enable_interp(kvm);
324809340b2fSMatthew Rosato 		mutex_unlock(&kvm->lock);
324909340b2fSMatthew Rosato 	}
325009340b2fSMatthew Rosato 
325151978393SFei Li 	mutex_init(&kvm->arch.float_int.ais_lock);
3252ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
32536d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
32546d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
32558a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
3256a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
3257ba5c1e9bSCarsten Otte 
3258b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
325978f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3260b0c632dbSHeiko Carstens 
3261e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
3262e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
3263a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3264e08b9637SCarsten Otte 	} else {
326532e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
3266ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = TASK_SIZE_MAX;
326732e6b236SGuenther Hutzl 		else
3268ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
326932e6b236SGuenther Hutzl 						    sclp.hamax + 1);
32706ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3271598841caSCarsten Otte 		if (!kvm->arch.gmap)
327240f5b735SDominik Dingel 			goto out_err;
32732c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
327424eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
3275e08b9637SCarsten Otte 	}
3276fa6b7fe9SCornelia Huck 
3277c9f0a2b8SJanosch Frank 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
327855531b74SJanosch Frank 	kvm->arch.use_skf = sclp.has_skey;
32798ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
3280a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
3281cc674ef2SMichael Mueller 	if (use_gisa)
3282d7c5cb01SMichael Mueller 		kvm_s390_gisa_init(kvm);
3283fb491d55SClaudio Imbrenda 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3284fb491d55SClaudio Imbrenda 	kvm->arch.pv.set_aside = NULL;
32858335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
32868ad35755SDavid Hildenbrand 
3287d89f5effSJan Kiszka 	return 0;
3288d89f5effSJan Kiszka out_err:
3289c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
329040f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
32917d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
329278f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3293d89f5effSJan Kiszka 	return rc;
3294b0c632dbSHeiko Carstens }
3295b0c632dbSHeiko Carstens 
3296d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3297d329c035SChristian Borntraeger {
329829b40f10SJanosch Frank 	u16 rc, rrc;
329929b40f10SJanosch Frank 
3300d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3301ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
330267335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
33033c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
3304bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
3305a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
330624fe0195SPierre Morel 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
330727e0393fSCarsten Otte 
330827e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
33096ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
331027e0393fSCarsten Otte 
3311e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
3312b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
331329b40f10SJanosch Frank 	/* We can not hold the vcpu mutex here, we are already dying */
331429b40f10SJanosch Frank 	if (kvm_s390_pv_cpu_get_handle(vcpu))
331529b40f10SJanosch Frank 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3316d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
3317d329c035SChristian Borntraeger }
3318d329c035SChristian Borntraeger 
3319b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
3320b0c632dbSHeiko Carstens {
332129b40f10SJanosch Frank 	u16 rc, rrc;
332229b40f10SJanosch Frank 
332327592ae8SMarc Zyngier 	kvm_destroy_vcpus(kvm);
33247d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
3325d7c5cb01SMichael Mueller 	kvm_s390_gisa_destroy(kvm);
332629b40f10SJanosch Frank 	/*
332729b40f10SJanosch Frank 	 * We are already at the end of life and kvm->lock is not taken.
332829b40f10SJanosch Frank 	 * This is ok as the file descriptor is closed by now and nobody
3329fb491d55SClaudio Imbrenda 	 * can mess with the pv state.
333029b40f10SJanosch Frank 	 */
3331fb491d55SClaudio Imbrenda 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3332ca2fd060SClaudio Imbrenda 	/*
3333ca2fd060SClaudio Imbrenda 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3334ca2fd060SClaudio Imbrenda 	 * and only if one was registered to begin with. If the VM is
3335ca2fd060SClaudio Imbrenda 	 * currently not protected, but has been previously been protected,
3336ca2fd060SClaudio Imbrenda 	 * then it's possible that the notifier is still registered.
3337ca2fd060SClaudio Imbrenda 	 */
3338ca2fd060SClaudio Imbrenda 	if (kvm->arch.pv.mmu_notifier.ops)
3339ca2fd060SClaudio Imbrenda 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3340ca2fd060SClaudio Imbrenda 
334129b40f10SJanosch Frank 	debug_unregister(kvm->arch.dbf);
3342c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
334327e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
33446ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
3345841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
334667335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
3347a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
33488335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3349b0c632dbSHeiko Carstens }
3350b0c632dbSHeiko Carstens 
3351b0c632dbSHeiko Carstens /* Section: vcpu related */
3352dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3353b0c632dbSHeiko Carstens {
33546ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
335527e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
335627e0393fSCarsten Otte 		return -ENOMEM;
33572c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
3358dafd032aSDominik Dingel 
335927e0393fSCarsten Otte 	return 0;
336027e0393fSCarsten Otte }
336127e0393fSCarsten Otte 
3362a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3363a6e2f683SEugene (jno) Dvurechenski {
3364a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries())
3365a6940674SDavid Hildenbrand 		return;
33665e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
33677d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
33687d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
33697d43bafcSEugene (jno) Dvurechenski 
33707d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
33717d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
33727d43bafcSEugene (jno) Dvurechenski 	} else {
3373bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3374a6e2f683SEugene (jno) Dvurechenski 
3375a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3376a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
3377a6e2f683SEugene (jno) Dvurechenski 	}
33785e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
33797d43bafcSEugene (jno) Dvurechenski }
3380a6e2f683SEugene (jno) Dvurechenski 
3381eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3382a6e2f683SEugene (jno) Dvurechenski {
3383a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3384fe0ef003SNico Boehr 		phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3385a6940674SDavid Hildenbrand 
3386a6940674SDavid Hildenbrand 		/* we still need the basic sca for the ipte control */
3387fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3388fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaol = sca_phys;
3389f07afa04SDavid Hildenbrand 		return;
3390a6940674SDavid Hildenbrand 	}
3391eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
3392eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
3393eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
3394fe0ef003SNico Boehr 		phys_addr_t sca_phys = virt_to_phys(sca);
33957d43bafcSEugene (jno) Dvurechenski 
3396fe0ef003SNico Boehr 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3397fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3398fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
33990c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3400eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
34017d43bafcSEugene (jno) Dvurechenski 	} else {
3402eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3403fe0ef003SNico Boehr 		phys_addr_t sca_phys = virt_to_phys(sca);
3404a6e2f683SEugene (jno) Dvurechenski 
3405fe0ef003SNico Boehr 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3406fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3407fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaol = sca_phys;
3408eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3409a6e2f683SEugene (jno) Dvurechenski 	}
3410eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
34115e044315SEugene (jno) Dvurechenski }
34125e044315SEugene (jno) Dvurechenski 
34135e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
34145e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
34155e044315SEugene (jno) Dvurechenski {
34165e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
34175e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
34185e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
34195e044315SEugene (jno) Dvurechenski }
34205e044315SEugene (jno) Dvurechenski 
34215e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
34225e044315SEugene (jno) Dvurechenski {
34235e044315SEugene (jno) Dvurechenski 	int i;
34245e044315SEugene (jno) Dvurechenski 
34255e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
34265e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
34275e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
34285e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
34295e044315SEugene (jno) Dvurechenski }
34305e044315SEugene (jno) Dvurechenski 
34315e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
34325e044315SEugene (jno) Dvurechenski {
34335e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
34345e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
34355e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
343646808a4cSMarc Zyngier 	unsigned long vcpu_idx;
34375e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
3438fe0ef003SNico Boehr 	phys_addr_t new_sca_phys;
34395e044315SEugene (jno) Dvurechenski 
344029b40f10SJanosch Frank 	if (kvm->arch.use_esca)
344129b40f10SJanosch Frank 		return 0;
344229b40f10SJanosch Frank 
3443c4196218SChristian Borntraeger 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
34445e044315SEugene (jno) Dvurechenski 	if (!new_sca)
34455e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
34465e044315SEugene (jno) Dvurechenski 
3447fe0ef003SNico Boehr 	new_sca_phys = virt_to_phys(new_sca);
3448fe0ef003SNico Boehr 	scaoh = new_sca_phys >> 32;
3449fe0ef003SNico Boehr 	scaol = new_sca_phys & ESCA_SCAOL_MASK;
34505e044315SEugene (jno) Dvurechenski 
34515e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
34525e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
34535e044315SEugene (jno) Dvurechenski 
34545e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
34555e044315SEugene (jno) Dvurechenski 
34565e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
34575e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
34585e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
34590c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
34605e044315SEugene (jno) Dvurechenski 	}
34615e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
34625e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
34635e044315SEugene (jno) Dvurechenski 
34645e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
34655e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
34665e044315SEugene (jno) Dvurechenski 
34675e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
34685e044315SEugene (jno) Dvurechenski 
34698335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
34708335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
34715e044315SEugene (jno) Dvurechenski 	return 0;
34727d43bafcSEugene (jno) Dvurechenski }
3473a6e2f683SEugene (jno) Dvurechenski 
3474a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3475a6e2f683SEugene (jno) Dvurechenski {
34765e044315SEugene (jno) Dvurechenski 	int rc;
34775e044315SEugene (jno) Dvurechenski 
3478a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3479a6940674SDavid Hildenbrand 		if (id < KVM_MAX_VCPUS)
3480a6940674SDavid Hildenbrand 			return true;
3481a6940674SDavid Hildenbrand 		return false;
3482a6940674SDavid Hildenbrand 	}
34835e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
34845e044315SEugene (jno) Dvurechenski 		return true;
348576a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
34865e044315SEugene (jno) Dvurechenski 		return false;
34875e044315SEugene (jno) Dvurechenski 
34885e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
34895e044315SEugene (jno) Dvurechenski 
34905e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3491a6e2f683SEugene (jno) Dvurechenski }
3492a6e2f683SEugene (jno) Dvurechenski 
3493db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3494db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3495db0758b2SDavid Hildenbrand {
3496db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
34979c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3498db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
34999c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3500db0758b2SDavid Hildenbrand }
3501db0758b2SDavid Hildenbrand 
3502db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3503db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3504db0758b2SDavid Hildenbrand {
3505db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
35069c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3507db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3508db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
35099c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3510db0758b2SDavid Hildenbrand }
3511db0758b2SDavid Hildenbrand 
3512db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3513db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3514db0758b2SDavid Hildenbrand {
3515db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3516db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
3517db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
3518db0758b2SDavid Hildenbrand }
3519db0758b2SDavid Hildenbrand 
3520db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3521db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3522db0758b2SDavid Hildenbrand {
3523db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3524db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
3525db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
3526db0758b2SDavid Hildenbrand }
3527db0758b2SDavid Hildenbrand 
3528db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3529db0758b2SDavid Hildenbrand {
3530db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3531db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
3532db0758b2SDavid Hildenbrand 	preempt_enable();
3533db0758b2SDavid Hildenbrand }
3534db0758b2SDavid Hildenbrand 
3535db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3536db0758b2SDavid Hildenbrand {
3537db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3538db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
3539db0758b2SDavid Hildenbrand 	preempt_enable();
3540db0758b2SDavid Hildenbrand }
3541db0758b2SDavid Hildenbrand 
35424287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
35434287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
35444287f247SDavid Hildenbrand {
3545db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
35469c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3547db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
3548db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
35494287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
35509c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3551db0758b2SDavid Hildenbrand 	preempt_enable();
35524287f247SDavid Hildenbrand }
35534287f247SDavid Hildenbrand 
3554db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
35554287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
35564287f247SDavid Hildenbrand {
35579c23a131SDavid Hildenbrand 	unsigned int seq;
3558db0758b2SDavid Hildenbrand 	__u64 value;
3559db0758b2SDavid Hildenbrand 
3560db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
35614287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
3562db0758b2SDavid Hildenbrand 
35639c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
35649c23a131SDavid Hildenbrand 	do {
35659c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
35669c23a131SDavid Hildenbrand 		/*
35679c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
35689c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
35699c23a131SDavid Hildenbrand 		 */
35709c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3571db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
35729c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
35739c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
3574db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
35759c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
35769c23a131SDavid Hildenbrand 	preempt_enable();
3577db0758b2SDavid Hildenbrand 	return value;
35784287f247SDavid Hildenbrand }
35794287f247SDavid Hildenbrand 
3580b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3581b0c632dbSHeiko Carstens {
35829977e886SHendrik Brueckner 
358337d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
3584ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
35855ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3586db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
358701a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
3588b0c632dbSHeiko Carstens }
3589b0c632dbSHeiko Carstens 
3590b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3591b0c632dbSHeiko Carstens {
359201a745acSDavid Hildenbrand 	vcpu->cpu = -1;
35935ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3594db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
35959daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
359637d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
359737d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
35989977e886SHendrik Brueckner 
3599b0c632dbSHeiko Carstens }
3600b0c632dbSHeiko Carstens 
360131928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
360242897d86SMarcelo Tosatti {
360372f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
3604fdf03650SFan Zhang 	preempt_disable();
360572f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3606d16b52cbSDavid Hildenbrand 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3607fdf03650SFan Zhang 	preempt_enable();
360872f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
360925508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3610dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3611eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
361225508824SDavid Hildenbrand 	}
36136502a34cSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
36146502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
361537d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
361637d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
361742897d86SMarcelo Tosatti }
361842897d86SMarcelo Tosatti 
36198ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
36208ec2fa52SChristian Borntraeger {
36218ec2fa52SChristian Borntraeger 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
36228ec2fa52SChristian Borntraeger 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
36238ec2fa52SChristian Borntraeger 		return true;
36248ec2fa52SChristian Borntraeger 	return false;
36258ec2fa52SChristian Borntraeger }
36268ec2fa52SChristian Borntraeger 
36278ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_ecc(struct kvm *kvm)
36288ec2fa52SChristian Borntraeger {
36298ec2fa52SChristian Borntraeger 	/* At least one ECC subfunction must be present */
36308ec2fa52SChristian Borntraeger 	return kvm_has_pckmo_subfunc(kvm, 32) ||
36318ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 33) ||
36328ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 34) ||
36338ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 40) ||
36348ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 41);
36358ec2fa52SChristian Borntraeger 
36368ec2fa52SChristian Borntraeger }
36378ec2fa52SChristian Borntraeger 
36385102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
36395102ee87STony Krowiak {
3640e585b24aSTony Krowiak 	/*
3641e585b24aSTony Krowiak 	 * If the AP instructions are not being interpreted and the MSAX3
3642e585b24aSTony Krowiak 	 * facility is not configured for the guest, there is nothing to set up.
3643e585b24aSTony Krowiak 	 */
3644e585b24aSTony Krowiak 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
36455102ee87STony Krowiak 		return;
36465102ee87STony Krowiak 
3647e585b24aSTony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3648a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
364937940fb0STony Krowiak 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
36508ec2fa52SChristian Borntraeger 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3651a374e892STony Krowiak 
3652e585b24aSTony Krowiak 	if (vcpu->kvm->arch.crypto.apie)
3653e585b24aSTony Krowiak 		vcpu->arch.sie_block->eca |= ECA_APIE;
3654e585b24aSTony Krowiak 
3655e585b24aSTony Krowiak 	/* Set up protected key support */
36568ec2fa52SChristian Borntraeger 	if (vcpu->kvm->arch.crypto.aes_kw) {
3657a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
36588ec2fa52SChristian Borntraeger 		/* ecc is also wrapped with AES key */
36598ec2fa52SChristian Borntraeger 		if (kvm_has_pckmo_ecc(vcpu->kvm))
36608ec2fa52SChristian Borntraeger 			vcpu->arch.sie_block->ecd |= ECD_ECC;
36618ec2fa52SChristian Borntraeger 	}
36628ec2fa52SChristian Borntraeger 
3663a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
3664a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
36655102ee87STony Krowiak }
36665102ee87STony Krowiak 
3667b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3668b31605c1SDominik Dingel {
3669fe0ef003SNico Boehr 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3670b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
3671b31605c1SDominik Dingel }
3672b31605c1SDominik Dingel 
3673b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3674b31605c1SDominik Dingel {
3675fe0ef003SNico Boehr 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3676fe0ef003SNico Boehr 
3677fe0ef003SNico Boehr 	if (!cbrlo_page)
3678b31605c1SDominik Dingel 		return -ENOMEM;
3679fe0ef003SNico Boehr 
3680fe0ef003SNico Boehr 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3681b31605c1SDominik Dingel 	return 0;
3682b31605c1SDominik Dingel }
3683b31605c1SDominik Dingel 
368491520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
368591520f1aSMichael Mueller {
368691520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
368791520f1aSMichael Mueller 
368891520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
368980bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
3690fe0ef003SNico Boehr 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
369191520f1aSMichael Mueller }
369291520f1aSMichael Mueller 
3693ff72bb55SSean Christopherson static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3694ff72bb55SSean Christopherson {
3695b31605c1SDominik Dingel 	int rc = 0;
369629b40f10SJanosch Frank 	u16 uvrc, uvrrc;
3697b31288faSKonstantin Weitz 
36989e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
36999e6dabefSCornelia Huck 						    CPUSTAT_SM |
3700a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
3701a4a4f191SGuenther Hutzl 
370253df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
3703ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
370453df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
3705ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3706a4a4f191SGuenther Hutzl 
370791520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
370891520f1aSMichael Mueller 
3709bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3710bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
37110c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3712bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
37130c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
371424fe0195SPierre Morel 	if (test_kvm_facility(vcpu->kvm, 11))
371524fe0195SPierre Morel 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3716f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
37170c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_TE;
37187119decfSJanis Schoetterl-Glausch 	if (!kvm_is_ucontrol(vcpu->kvm))
37197119decfSJanis Schoetterl-Glausch 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
37207feb6bb8SMichael Mueller 
3721c9f0a2b8SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
37220c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3723cd1836f5SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 130))
37240c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
37250c9d8683SDavid Hildenbrand 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
372648ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
37270c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_CEI;
372811ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
37290c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_IB;
373037c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
37310c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SII;
373237c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
37330c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
373418280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
37350c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_VX;
37360c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
373713211ea7SEric Farman 	}
37388fa1696eSCollin L. Walling 	if (test_kvm_facility(vcpu->kvm, 139))
37398fa1696eSCollin L. Walling 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3740a3da7b4aSChristian Borntraeger 	if (test_kvm_facility(vcpu->kvm, 156))
3741a3da7b4aSChristian Borntraeger 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3742d7c5cb01SMichael Mueller 	if (vcpu->arch.sie_block->gd) {
3743d7c5cb01SMichael Mueller 		vcpu->arch.sie_block->eca |= ECA_AIV;
3744d7c5cb01SMichael Mueller 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3745d7c5cb01SMichael Mueller 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3746d7c5cb01SMichael Mueller 	}
3747fe0ef003SNico Boehr 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3748fe0ef003SNico Boehr 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3749730cd632SFarhan Ali 
3750730cd632SFarhan Ali 	if (sclp.has_kss)
3751ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3752730cd632SFarhan Ali 	else
3753492d8642SThomas Huth 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
37545a5e6536SMatthew Rosato 
3755e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
3756b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3757b31605c1SDominik Dingel 		if (rc)
3758b31605c1SDominik Dingel 			return rc;
3759b31288faSKonstantin Weitz 	}
37600ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3761ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
37629d8d5786SMichael Mueller 
376367d49d52SCollin Walling 	vcpu->arch.sie_block->hpid = HPID_KVM;
376467d49d52SCollin Walling 
37655102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
37665102ee87STony Krowiak 
37673f4bbb43SMatthew Rosato 	kvm_s390_vcpu_pci_setup(vcpu);
37683f4bbb43SMatthew Rosato 
376929b40f10SJanosch Frank 	mutex_lock(&vcpu->kvm->lock);
377029b40f10SJanosch Frank 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
377129b40f10SJanosch Frank 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
377229b40f10SJanosch Frank 		if (rc)
377329b40f10SJanosch Frank 			kvm_s390_vcpu_unsetup_cmma(vcpu);
377429b40f10SJanosch Frank 	}
377529b40f10SJanosch Frank 	mutex_unlock(&vcpu->kvm->lock);
377629b40f10SJanosch Frank 
3777b31605c1SDominik Dingel 	return rc;
3778b0c632dbSHeiko Carstens }
3779b0c632dbSHeiko Carstens 
3780897cc38eSSean Christopherson int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3781897cc38eSSean Christopherson {
3782897cc38eSSean Christopherson 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3783897cc38eSSean Christopherson 		return -EINVAL;
3784897cc38eSSean Christopherson 	return 0;
3785897cc38eSSean Christopherson }
3786897cc38eSSean Christopherson 
3787e529ef66SSean Christopherson int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3788b0c632dbSHeiko Carstens {
37897feb6bb8SMichael Mueller 	struct sie_page *sie_page;
3790897cc38eSSean Christopherson 	int rc;
37914d47555aSCarsten Otte 
3792da72ca4dSQingFeng Hao 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3793c4196218SChristian Borntraeger 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
37947feb6bb8SMichael Mueller 	if (!sie_page)
3795e529ef66SSean Christopherson 		return -ENOMEM;
3796b0c632dbSHeiko Carstens 
37977feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
3798fe0ef003SNico Boehr 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
37997feb6bb8SMichael Mueller 
3800efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
3801efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
3802efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
3803efed1104SDavid Hildenbrand 
3804e529ef66SSean Christopherson 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3805ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
3806ee6a569dSMichael Mueller 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
38079c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
3808ba5c1e9bSCarsten Otte 
3809321f8ee5SSean Christopherson 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3810321f8ee5SSean Christopherson 	kvm_clear_async_pf_completion_queue(vcpu);
3811321f8ee5SSean Christopherson 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3812321f8ee5SSean Christopherson 				    KVM_SYNC_GPRS |
3813321f8ee5SSean Christopherson 				    KVM_SYNC_ACRS |
3814321f8ee5SSean Christopherson 				    KVM_SYNC_CRS |
3815321f8ee5SSean Christopherson 				    KVM_SYNC_ARCH0 |
381623a60f83SCollin Walling 				    KVM_SYNC_PFAULT |
381723a60f83SCollin Walling 				    KVM_SYNC_DIAG318;
3818321f8ee5SSean Christopherson 	kvm_s390_set_prefix(vcpu, 0);
3819321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 64))
3820321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3821321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 82))
3822321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3823321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 133))
3824321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3825321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 156))
3826321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3827321f8ee5SSean Christopherson 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3828321f8ee5SSean Christopherson 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3829321f8ee5SSean Christopherson 	 */
3830321f8ee5SSean Christopherson 	if (MACHINE_HAS_VX)
3831321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3832321f8ee5SSean Christopherson 	else
3833321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3834321f8ee5SSean Christopherson 
3835321f8ee5SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm)) {
3836321f8ee5SSean Christopherson 		rc = __kvm_ucontrol_vcpu_init(vcpu);
3837321f8ee5SSean Christopherson 		if (rc)
3838a2017f17SSean Christopherson 			goto out_free_sie_block;
3839321f8ee5SSean Christopherson 	}
3840321f8ee5SSean Christopherson 
3841e529ef66SSean Christopherson 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3842e529ef66SSean Christopherson 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3843e529ef66SSean Christopherson 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3844b0c632dbSHeiko Carstens 
3845ff72bb55SSean Christopherson 	rc = kvm_s390_vcpu_setup(vcpu);
3846ff72bb55SSean Christopherson 	if (rc)
3847ff72bb55SSean Christopherson 		goto out_ucontrol_uninit;
384824fe0195SPierre Morel 
384924fe0195SPierre Morel 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3850e529ef66SSean Christopherson 	return 0;
3851e529ef66SSean Christopherson 
3852ff72bb55SSean Christopherson out_ucontrol_uninit:
3853ff72bb55SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm))
3854ff72bb55SSean Christopherson 		gmap_remove(vcpu->arch.gmap);
38557b06bf2fSWei Yongjun out_free_sie_block:
38567b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
3857e529ef66SSean Christopherson 	return rc;
3858b0c632dbSHeiko Carstens }
3859b0c632dbSHeiko Carstens 
3860b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3861b0c632dbSHeiko Carstens {
38629b57e9d5SHalil Pasic 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
38639a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3864b0c632dbSHeiko Carstens }
3865b0c632dbSHeiko Carstens 
3866199b5763SLongpeng(Mike) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3867199b5763SLongpeng(Mike) {
38680546c63dSLongpeng(Mike) 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3869199b5763SLongpeng(Mike) }
3870199b5763SLongpeng(Mike) 
387127406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
387249b99e1eSChristian Borntraeger {
3873805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
387461a6df54SDavid Hildenbrand 	exit_sie(vcpu);
387549b99e1eSChristian Borntraeger }
387649b99e1eSChristian Borntraeger 
387727406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
387849b99e1eSChristian Borntraeger {
3879805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
388049b99e1eSChristian Borntraeger }
388149b99e1eSChristian Borntraeger 
38828e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
38838e236546SChristian Borntraeger {
3884805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
388561a6df54SDavid Hildenbrand 	exit_sie(vcpu);
38868e236546SChristian Borntraeger }
38878e236546SChristian Borntraeger 
38889ea59728SDavid Hildenbrand bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
38899ea59728SDavid Hildenbrand {
38909ea59728SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->prog20) &
38919ea59728SDavid Hildenbrand 	       (PROG_BLOCK_SIE | PROG_REQUEST);
38929ea59728SDavid Hildenbrand }
38939ea59728SDavid Hildenbrand 
38948e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
38958e236546SChristian Borntraeger {
38969bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
38978e236546SChristian Borntraeger }
38988e236546SChristian Borntraeger 
389949b99e1eSChristian Borntraeger /*
39009ea59728SDavid Hildenbrand  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
390149b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
390249b99e1eSChristian Borntraeger  * return immediately. */
390349b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
390449b99e1eSChristian Borntraeger {
3905ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
39069ea59728SDavid Hildenbrand 	kvm_s390_vsie_kick(vcpu);
390749b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
390849b99e1eSChristian Borntraeger 		cpu_relax();
390949b99e1eSChristian Borntraeger }
391049b99e1eSChristian Borntraeger 
39118e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
39128e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
391349b99e1eSChristian Borntraeger {
3914df06dae3SSean Christopherson 	__kvm_make_request(req, vcpu);
39158e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
391649b99e1eSChristian Borntraeger }
391749b99e1eSChristian Borntraeger 
3918414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3919414d3b07SMartin Schwidefsky 			      unsigned long end)
39202c70fe44SChristian Borntraeger {
39212c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
39222c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
3923414d3b07SMartin Schwidefsky 	unsigned long prefix;
392446808a4cSMarc Zyngier 	unsigned long i;
39252c70fe44SChristian Borntraeger 
392665d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
392765d0b0d4SDavid Hildenbrand 		return;
3928414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
3929414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
3930414d3b07SMartin Schwidefsky 		return;
39312c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
39322c70fe44SChristian Borntraeger 		/* match against both prefix pages */
3933414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
3934414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3935414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3936414d3b07SMartin Schwidefsky 				   start, end);
3937cc65c3a1SSean Christopherson 			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
39382c70fe44SChristian Borntraeger 		}
39392c70fe44SChristian Borntraeger 	}
39402c70fe44SChristian Borntraeger }
39412c70fe44SChristian Borntraeger 
39428b905d28SChristian Borntraeger bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
39438b905d28SChristian Borntraeger {
39448b905d28SChristian Borntraeger 	/* do not poll with more than halt_poll_max_steal percent of steal time */
39458b905d28SChristian Borntraeger 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
39466f390916SSean Christopherson 	    READ_ONCE(halt_poll_max_steal)) {
39478b905d28SChristian Borntraeger 		vcpu->stat.halt_no_poll_steal++;
39488b905d28SChristian Borntraeger 		return true;
39498b905d28SChristian Borntraeger 	}
39508b905d28SChristian Borntraeger 	return false;
39518b905d28SChristian Borntraeger }
39528b905d28SChristian Borntraeger 
3953b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3954b6d33834SChristoffer Dall {
3955b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
3956b6d33834SChristoffer Dall 	BUG();
3957b6d33834SChristoffer Dall 	return 0;
3958b6d33834SChristoffer Dall }
3959b6d33834SChristoffer Dall 
396014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
396114eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
396214eebd91SCarsten Otte {
396314eebd91SCarsten Otte 	int r = -EINVAL;
396414eebd91SCarsten Otte 
396514eebd91SCarsten Otte 	switch (reg->id) {
396629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
396729b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
396829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
396929b7c71bSCarsten Otte 		break;
397029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
397129b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
397229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
397329b7c71bSCarsten Otte 		break;
397446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
39754287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
397646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
397746a6dd1cSJason J. herne 		break;
397846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
397946a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
398046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
398146a6dd1cSJason J. herne 		break;
3982536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
3983536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
3984536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3985536336c2SDominik Dingel 		break;
3986536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
3987536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
3988536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3989536336c2SDominik Dingel 		break;
3990536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
3991536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
3992536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3993536336c2SDominik Dingel 		break;
3994672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
3995672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
3996672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
3997672550fbSChristian Borntraeger 		break;
3998afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
3999afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
4000afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
4001afa45ff5SChristian Borntraeger 		break;
400214eebd91SCarsten Otte 	default:
400314eebd91SCarsten Otte 		break;
400414eebd91SCarsten Otte 	}
400514eebd91SCarsten Otte 
400614eebd91SCarsten Otte 	return r;
400714eebd91SCarsten Otte }
400814eebd91SCarsten Otte 
400914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
401014eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
401114eebd91SCarsten Otte {
401214eebd91SCarsten Otte 	int r = -EINVAL;
40134287f247SDavid Hildenbrand 	__u64 val;
401414eebd91SCarsten Otte 
401514eebd91SCarsten Otte 	switch (reg->id) {
401629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
401729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
401829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
401929b7c71bSCarsten Otte 		break;
402029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
402129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
402229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
402329b7c71bSCarsten Otte 		break;
402446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
40254287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
40264287f247SDavid Hildenbrand 		if (!r)
40274287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
402846a6dd1cSJason J. herne 		break;
402946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
403046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
403146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
403246a6dd1cSJason J. herne 		break;
4033536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
4034536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
4035536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
40369fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
40379fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
4038536336c2SDominik Dingel 		break;
4039536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
4040536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
4041536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
4042536336c2SDominik Dingel 		break;
4043536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
4044536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
4045536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
4046536336c2SDominik Dingel 		break;
4047672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
4048672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
4049672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
4050672550fbSChristian Borntraeger 		break;
4051afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
4052afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
4053afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
4054afa45ff5SChristian Borntraeger 		break;
405514eebd91SCarsten Otte 	default:
405614eebd91SCarsten Otte 		break;
405714eebd91SCarsten Otte 	}
405814eebd91SCarsten Otte 
405914eebd91SCarsten Otte 	return r;
406014eebd91SCarsten Otte }
4061b6d33834SChristoffer Dall 
40627de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4063b0c632dbSHeiko Carstens {
40647de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
40657de3f142SJanosch Frank 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
40667de3f142SJanosch Frank 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
40677de3f142SJanosch Frank 
40687de3f142SJanosch Frank 	kvm_clear_async_pf_completion_queue(vcpu);
40697de3f142SJanosch Frank 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
40707de3f142SJanosch Frank 		kvm_s390_vcpu_stop(vcpu);
40717de3f142SJanosch Frank 	kvm_s390_clear_local_irqs(vcpu);
40727de3f142SJanosch Frank }
40737de3f142SJanosch Frank 
40747de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
40757de3f142SJanosch Frank {
40767de3f142SJanosch Frank 	/* Initial reset is a superset of the normal reset */
40777de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
40787de3f142SJanosch Frank 
4079e93fc7b4SChristian Borntraeger 	/*
4080e93fc7b4SChristian Borntraeger 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
4081e93fc7b4SChristian Borntraeger 	 * We do not only reset the internal data, but also ...
4082e93fc7b4SChristian Borntraeger 	 */
40837de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask = 0;
40847de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.addr = 0;
40857de3f142SJanosch Frank 	kvm_s390_set_prefix(vcpu, 0);
40867de3f142SJanosch Frank 	kvm_s390_set_cpu_timer(vcpu, 0);
40877de3f142SJanosch Frank 	vcpu->arch.sie_block->ckc = 0;
40887de3f142SJanosch Frank 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
40897de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
40907de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4091e93fc7b4SChristian Borntraeger 
4092e93fc7b4SChristian Borntraeger 	/* ... the data in sync regs */
4093e93fc7b4SChristian Borntraeger 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4094e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
4095e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4096e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4097e93fc7b4SChristian Borntraeger 	vcpu->run->psw_addr = 0;
4098e93fc7b4SChristian Borntraeger 	vcpu->run->psw_mask = 0;
4099e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.todpr = 0;
4100e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.cputm = 0;
4101e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
4102e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.pp = 0;
4103e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.gbea = 1;
41047de3f142SJanosch Frank 	vcpu->run->s.regs.fpc = 0;
41050f303504SJanosch Frank 	/*
41060f303504SJanosch Frank 	 * Do not reset these registers in the protected case, as some of
41070f303504SJanosch Frank 	 * them are overlayed and they are not accessible in this case
41080f303504SJanosch Frank 	 * anyway.
41090f303504SJanosch Frank 	 */
41100f303504SJanosch Frank 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
41117de3f142SJanosch Frank 		vcpu->arch.sie_block->gbea = 1;
41127de3f142SJanosch Frank 		vcpu->arch.sie_block->pp = 0;
41137de3f142SJanosch Frank 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
41140f303504SJanosch Frank 		vcpu->arch.sie_block->todpr = 0;
41150f303504SJanosch Frank 	}
41167de3f142SJanosch Frank }
41177de3f142SJanosch Frank 
41187de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
41197de3f142SJanosch Frank {
41207de3f142SJanosch Frank 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
41217de3f142SJanosch Frank 
41227de3f142SJanosch Frank 	/* Clear reset is a superset of the initial reset */
41237de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
41247de3f142SJanosch Frank 
41257de3f142SJanosch Frank 	memset(&regs->gprs, 0, sizeof(regs->gprs));
41267de3f142SJanosch Frank 	memset(&regs->vrs, 0, sizeof(regs->vrs));
41277de3f142SJanosch Frank 	memset(&regs->acrs, 0, sizeof(regs->acrs));
41287de3f142SJanosch Frank 	memset(&regs->gscb, 0, sizeof(regs->gscb));
41297de3f142SJanosch Frank 
41307de3f142SJanosch Frank 	regs->etoken = 0;
41317de3f142SJanosch Frank 	regs->etoken_extension = 0;
4132b0c632dbSHeiko Carstens }
4133b0c632dbSHeiko Carstens 
4134b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4135b0c632dbSHeiko Carstens {
4136875656feSChristoffer Dall 	vcpu_load(vcpu);
41375a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4138875656feSChristoffer Dall 	vcpu_put(vcpu);
4139b0c632dbSHeiko Carstens 	return 0;
4140b0c632dbSHeiko Carstens }
4141b0c632dbSHeiko Carstens 
4142b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4143b0c632dbSHeiko Carstens {
41441fc9b76bSChristoffer Dall 	vcpu_load(vcpu);
41455a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
41461fc9b76bSChristoffer Dall 	vcpu_put(vcpu);
4147b0c632dbSHeiko Carstens 	return 0;
4148b0c632dbSHeiko Carstens }
4149b0c632dbSHeiko Carstens 
4150b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4151b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
4152b0c632dbSHeiko Carstens {
4153b4ef9d4eSChristoffer Dall 	vcpu_load(vcpu);
4154b4ef9d4eSChristoffer Dall 
415559674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4156b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4157b4ef9d4eSChristoffer Dall 
4158b4ef9d4eSChristoffer Dall 	vcpu_put(vcpu);
4159b0c632dbSHeiko Carstens 	return 0;
4160b0c632dbSHeiko Carstens }
4161b0c632dbSHeiko Carstens 
4162b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4163b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
4164b0c632dbSHeiko Carstens {
4165bcdec41cSChristoffer Dall 	vcpu_load(vcpu);
4166bcdec41cSChristoffer Dall 
416759674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4168b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4169bcdec41cSChristoffer Dall 
4170bcdec41cSChristoffer Dall 	vcpu_put(vcpu);
4171b0c632dbSHeiko Carstens 	return 0;
4172b0c632dbSHeiko Carstens }
4173b0c632dbSHeiko Carstens 
4174b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4175b0c632dbSHeiko Carstens {
41766a96bc7fSChristoffer Dall 	int ret = 0;
41776a96bc7fSChristoffer Dall 
41786a96bc7fSChristoffer Dall 	vcpu_load(vcpu);
41796a96bc7fSChristoffer Dall 
41806a96bc7fSChristoffer Dall 	if (test_fp_ctl(fpu->fpc)) {
41816a96bc7fSChristoffer Dall 		ret = -EINVAL;
41826a96bc7fSChristoffer Dall 		goto out;
41836a96bc7fSChristoffer Dall 	}
4184e1788bb9SChristian Borntraeger 	vcpu->run->s.regs.fpc = fpu->fpc;
41859abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
4186a7d4b8f2SDavid Hildenbrand 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4187a7d4b8f2SDavid Hildenbrand 				 (freg_t *) fpu->fprs);
41889abc2a08SDavid Hildenbrand 	else
4189a7d4b8f2SDavid Hildenbrand 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
41906a96bc7fSChristoffer Dall 
41916a96bc7fSChristoffer Dall out:
41926a96bc7fSChristoffer Dall 	vcpu_put(vcpu);
41936a96bc7fSChristoffer Dall 	return ret;
4194b0c632dbSHeiko Carstens }
4195b0c632dbSHeiko Carstens 
4196b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4197b0c632dbSHeiko Carstens {
41981393123eSChristoffer Dall 	vcpu_load(vcpu);
41991393123eSChristoffer Dall 
42009abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
42019abc2a08SDavid Hildenbrand 	save_fpu_regs();
42029abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
4203a7d4b8f2SDavid Hildenbrand 		convert_vx_to_fp((freg_t *) fpu->fprs,
4204a7d4b8f2SDavid Hildenbrand 				 (__vector128 *) vcpu->run->s.regs.vrs);
42059abc2a08SDavid Hildenbrand 	else
4206a7d4b8f2SDavid Hildenbrand 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4207e1788bb9SChristian Borntraeger 	fpu->fpc = vcpu->run->s.regs.fpc;
42081393123eSChristoffer Dall 
42091393123eSChristoffer Dall 	vcpu_put(vcpu);
4210b0c632dbSHeiko Carstens 	return 0;
4211b0c632dbSHeiko Carstens }
4212b0c632dbSHeiko Carstens 
4213b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4214b0c632dbSHeiko Carstens {
4215b0c632dbSHeiko Carstens 	int rc = 0;
4216b0c632dbSHeiko Carstens 
42177a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
4218b0c632dbSHeiko Carstens 		rc = -EBUSY;
4219d7b0b5ebSCarsten Otte 	else {
4220d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
4221d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
4222d7b0b5ebSCarsten Otte 	}
4223b0c632dbSHeiko Carstens 	return rc;
4224b0c632dbSHeiko Carstens }
4225b0c632dbSHeiko Carstens 
4226b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4227b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
4228b0c632dbSHeiko Carstens {
4229b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
4230b0c632dbSHeiko Carstens }
4231b0c632dbSHeiko Carstens 
423227291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
423327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
423427291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
423527291e21SDavid Hildenbrand 
4236d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4237d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
4238b0c632dbSHeiko Carstens {
423927291e21SDavid Hildenbrand 	int rc = 0;
424027291e21SDavid Hildenbrand 
424166b56562SChristoffer Dall 	vcpu_load(vcpu);
424266b56562SChristoffer Dall 
424327291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
424427291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
424527291e21SDavid Hildenbrand 
424666b56562SChristoffer Dall 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
424766b56562SChristoffer Dall 		rc = -EINVAL;
424866b56562SChristoffer Dall 		goto out;
424966b56562SChristoffer Dall 	}
425066b56562SChristoffer Dall 	if (!sclp.has_gpere) {
425166b56562SChristoffer Dall 		rc = -EINVAL;
425266b56562SChristoffer Dall 		goto out;
425366b56562SChristoffer Dall 	}
425427291e21SDavid Hildenbrand 
425527291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
425627291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
425727291e21SDavid Hildenbrand 		/* enforce guest PER */
4258ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
425927291e21SDavid Hildenbrand 
426027291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
426127291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
426227291e21SDavid Hildenbrand 	} else {
42639daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
426427291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
426527291e21SDavid Hildenbrand 	}
426627291e21SDavid Hildenbrand 
426727291e21SDavid Hildenbrand 	if (rc) {
426827291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
426927291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
42709daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
427127291e21SDavid Hildenbrand 	}
427227291e21SDavid Hildenbrand 
427366b56562SChristoffer Dall out:
427466b56562SChristoffer Dall 	vcpu_put(vcpu);
427527291e21SDavid Hildenbrand 	return rc;
4276b0c632dbSHeiko Carstens }
4277b0c632dbSHeiko Carstens 
427862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
427962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
428062d9f0dbSMarcelo Tosatti {
4281fd232561SChristoffer Dall 	int ret;
4282fd232561SChristoffer Dall 
4283fd232561SChristoffer Dall 	vcpu_load(vcpu);
4284fd232561SChristoffer Dall 
42856352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
4286fd232561SChristoffer Dall 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
42876352e4d2SDavid Hildenbrand 				      KVM_MP_STATE_OPERATING;
4288fd232561SChristoffer Dall 
4289fd232561SChristoffer Dall 	vcpu_put(vcpu);
4290fd232561SChristoffer Dall 	return ret;
429162d9f0dbSMarcelo Tosatti }
429262d9f0dbSMarcelo Tosatti 
429362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
429462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
429562d9f0dbSMarcelo Tosatti {
42966352e4d2SDavid Hildenbrand 	int rc = 0;
42976352e4d2SDavid Hildenbrand 
4298e83dff5eSChristoffer Dall 	vcpu_load(vcpu);
4299e83dff5eSChristoffer Dall 
43006352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
430167cf68b6SEric Farman 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
43026352e4d2SDavid Hildenbrand 
43036352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
43046352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
4305fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_stop(vcpu);
43066352e4d2SDavid Hildenbrand 		break;
43076352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
4308fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_start(vcpu);
43096352e4d2SDavid Hildenbrand 		break;
43106352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
43117c36a3fcSJanosch Frank 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
43127c36a3fcSJanosch Frank 			rc = -ENXIO;
43137c36a3fcSJanosch Frank 			break;
43147c36a3fcSJanosch Frank 		}
43157c36a3fcSJanosch Frank 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
43167c36a3fcSJanosch Frank 		break;
43176352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
43183b684a42SJoe Perches 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
43196352e4d2SDavid Hildenbrand 	default:
43206352e4d2SDavid Hildenbrand 		rc = -ENXIO;
43216352e4d2SDavid Hildenbrand 	}
43226352e4d2SDavid Hildenbrand 
4323e83dff5eSChristoffer Dall 	vcpu_put(vcpu);
43246352e4d2SDavid Hildenbrand 	return rc;
432562d9f0dbSMarcelo Tosatti }
432662d9f0dbSMarcelo Tosatti 
43278ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
43288ad35755SDavid Hildenbrand {
43298d5fb0dcSDavid Hildenbrand 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
43308ad35755SDavid Hildenbrand }
43318ad35755SDavid Hildenbrand 
43322c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
43332c70fe44SChristian Borntraeger {
43348ad35755SDavid Hildenbrand retry:
43358e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
43362fa6e1e1SRadim Krčmář 	if (!kvm_request_pending(vcpu))
4337586b7ccdSChristian Borntraeger 		return 0;
43382c70fe44SChristian Borntraeger 	/*
4339cc65c3a1SSean Christopherson 	 * If the guest prefix changed, re-arm the ipte notifier for the
4340b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
43412c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
43422c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
43432c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
43442c70fe44SChristian Borntraeger 	 */
4345cc65c3a1SSean Christopherson 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
43462c70fe44SChristian Borntraeger 		int rc;
4347b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4348fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
4349b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
4350aca411a4SJulius Niedworok 		if (rc) {
4351cc65c3a1SSean Christopherson 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
43522c70fe44SChristian Borntraeger 			return rc;
4353aca411a4SJulius Niedworok 		}
43548ad35755SDavid Hildenbrand 		goto retry;
43552c70fe44SChristian Borntraeger 	}
43568ad35755SDavid Hildenbrand 
4357d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4358d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
4359d3d692c8SDavid Hildenbrand 		goto retry;
4360d3d692c8SDavid Hildenbrand 	}
4361d3d692c8SDavid Hildenbrand 
43628ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
43638ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
43648ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4365ef8f4f49SDavid Hildenbrand 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
43668ad35755SDavid Hildenbrand 		}
43678ad35755SDavid Hildenbrand 		goto retry;
43688ad35755SDavid Hildenbrand 	}
43698ad35755SDavid Hildenbrand 
43708ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
43718ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
43728ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
43739daecfc6SDavid Hildenbrand 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
43748ad35755SDavid Hildenbrand 		}
43758ad35755SDavid Hildenbrand 		goto retry;
43768ad35755SDavid Hildenbrand 	}
43778ad35755SDavid Hildenbrand 
43786502a34cSDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
43796502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
43806502a34cSDavid Hildenbrand 		goto retry;
43816502a34cSDavid Hildenbrand 	}
43826502a34cSDavid Hildenbrand 
4383190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4384190df4a2SClaudio Imbrenda 		/*
4385c9f0a2b8SJanosch Frank 		 * Disable CMM virtualization; we will emulate the ESSA
4386190df4a2SClaudio Imbrenda 		 * instruction manually, in order to provide additional
4387190df4a2SClaudio Imbrenda 		 * functionalities needed for live migration.
4388190df4a2SClaudio Imbrenda 		 */
4389190df4a2SClaudio Imbrenda 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4390190df4a2SClaudio Imbrenda 		goto retry;
4391190df4a2SClaudio Imbrenda 	}
4392190df4a2SClaudio Imbrenda 
4393190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4394190df4a2SClaudio Imbrenda 		/*
4395c9f0a2b8SJanosch Frank 		 * Re-enable CMM virtualization if CMMA is available and
4396c9f0a2b8SJanosch Frank 		 * CMM has been used.
4397190df4a2SClaudio Imbrenda 		 */
4398190df4a2SClaudio Imbrenda 		if ((vcpu->kvm->arch.use_cmma) &&
4399c9f0a2b8SJanosch Frank 		    (vcpu->kvm->mm->context.uses_cmm))
4400190df4a2SClaudio Imbrenda 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4401190df4a2SClaudio Imbrenda 		goto retry;
4402190df4a2SClaudio Imbrenda 	}
4403190df4a2SClaudio Imbrenda 
44043194cdb7SDavid Hildenbrand 	/* we left the vsie handler, nothing to do, just clear the request */
44053194cdb7SDavid Hildenbrand 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
44060759d068SDavid Hildenbrand 
44072c70fe44SChristian Borntraeger 	return 0;
44082c70fe44SChristian Borntraeger }
44092c70fe44SChristian Borntraeger 
4410c0573ba5SClaudio Imbrenda static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
44118fa1696eSCollin L. Walling {
44128fa1696eSCollin L. Walling 	struct kvm_vcpu *vcpu;
44132cfd7b73SHeiko Carstens 	union tod_clock clk;
441446808a4cSMarc Zyngier 	unsigned long i;
44158fa1696eSCollin L. Walling 
44168fa1696eSCollin L. Walling 	preempt_disable();
44178fa1696eSCollin L. Walling 
44182cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
44198fa1696eSCollin L. Walling 
44202cfd7b73SHeiko Carstens 	kvm->arch.epoch = gtod->tod - clk.tod;
44210e7def5fSDavid Hildenbrand 	kvm->arch.epdx = 0;
44220e7def5fSDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
44232cfd7b73SHeiko Carstens 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
44248fa1696eSCollin L. Walling 		if (kvm->arch.epoch > gtod->tod)
44258fa1696eSCollin L. Walling 			kvm->arch.epdx -= 1;
44260e7def5fSDavid Hildenbrand 	}
44278fa1696eSCollin L. Walling 
44288fa1696eSCollin L. Walling 	kvm_s390_vcpu_block_all(kvm);
44298fa1696eSCollin L. Walling 	kvm_for_each_vcpu(i, vcpu, kvm) {
44308fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
44318fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
44328fa1696eSCollin L. Walling 	}
44338fa1696eSCollin L. Walling 
44348fa1696eSCollin L. Walling 	kvm_s390_vcpu_unblock_all(kvm);
44358fa1696eSCollin L. Walling 	preempt_enable();
4436c0573ba5SClaudio Imbrenda }
4437c0573ba5SClaudio Imbrenda 
4438c0573ba5SClaudio Imbrenda int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4439c0573ba5SClaudio Imbrenda {
4440c0573ba5SClaudio Imbrenda 	if (!mutex_trylock(&kvm->lock))
4441c0573ba5SClaudio Imbrenda 		return 0;
4442c0573ba5SClaudio Imbrenda 	__kvm_s390_set_tod_clock(kvm, gtod);
4443c0573ba5SClaudio Imbrenda 	mutex_unlock(&kvm->lock);
4444c0573ba5SClaudio Imbrenda 	return 1;
4445c0573ba5SClaudio Imbrenda }
4446c0573ba5SClaudio Imbrenda 
4447fa576c58SThomas Huth /**
4448fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
4449fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
4450fa576c58SThomas Huth  * @gpa: Guest physical address
4451fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
4452fa576c58SThomas Huth  *
4453fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
4454fa576c58SThomas Huth  *
4455fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
4456fa576c58SThomas Huth  */
4457fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
445824eb3a82SDominik Dingel {
4459527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
4460527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
446124eb3a82SDominik Dingel }
446224eb3a82SDominik Dingel 
44633c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
44643c038e6bSDominik Dingel 				      unsigned long token)
44653c038e6bSDominik Dingel {
44663c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
4467383d0b05SJens Freimann 	struct kvm_s390_irq irq;
44683c038e6bSDominik Dingel 
44693c038e6bSDominik Dingel 	if (start_token) {
4470383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
4471383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
4472383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
44733c038e6bSDominik Dingel 	} else {
44743c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
4475383d0b05SJens Freimann 		inti.parm64 = token;
44763c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
44773c038e6bSDominik Dingel 	}
44783c038e6bSDominik Dingel }
44793c038e6bSDominik Dingel 
44802a18b7e7SVitaly Kuznetsov bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
44813c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
44823c038e6bSDominik Dingel {
44833c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
44843c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
44852a18b7e7SVitaly Kuznetsov 
44862a18b7e7SVitaly Kuznetsov 	return true;
44873c038e6bSDominik Dingel }
44883c038e6bSDominik Dingel 
44893c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
44903c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
44913c038e6bSDominik Dingel {
44923c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
44933c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
44943c038e6bSDominik Dingel }
44953c038e6bSDominik Dingel 
44963c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
44973c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
44983c038e6bSDominik Dingel {
44993c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
45003c038e6bSDominik Dingel }
45013c038e6bSDominik Dingel 
45027c0ade6cSVitaly Kuznetsov bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
45033c038e6bSDominik Dingel {
45043c038e6bSDominik Dingel 	/*
45053c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
45063c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
45073c038e6bSDominik Dingel 	 */
45083c038e6bSDominik Dingel 	return true;
45093c038e6bSDominik Dingel }
45103c038e6bSDominik Dingel 
4511e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
45123c038e6bSDominik Dingel {
45133c038e6bSDominik Dingel 	hva_t hva;
45143c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
45153c038e6bSDominik Dingel 
45163c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4517e8c22266SVitaly Kuznetsov 		return false;
45183c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
45193c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
4520e8c22266SVitaly Kuznetsov 		return false;
45213c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
4522e8c22266SVitaly Kuznetsov 		return false;
45239a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4524e8c22266SVitaly Kuznetsov 		return false;
4525b9224cd7SDavid Hildenbrand 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4526e8c22266SVitaly Kuznetsov 		return false;
45273c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
4528e8c22266SVitaly Kuznetsov 		return false;
45293c038e6bSDominik Dingel 
453081480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
453181480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
453281480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4533e8c22266SVitaly Kuznetsov 		return false;
45343c038e6bSDominik Dingel 
4535e8c22266SVitaly Kuznetsov 	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
45363c038e6bSDominik Dingel }
45373c038e6bSDominik Dingel 
45383fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4539b0c632dbSHeiko Carstens {
45403fb4c40fSThomas Huth 	int rc, cpuflags;
4541e168bf8dSCarsten Otte 
45423c038e6bSDominik Dingel 	/*
45433c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
45443c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
45453c038e6bSDominik Dingel 	 * handled outside the worker.
45463c038e6bSDominik Dingel 	 */
45473c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
45483c038e6bSDominik Dingel 
45497ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
45507ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4551b0c632dbSHeiko Carstens 
4552b0c632dbSHeiko Carstens 	if (need_resched())
4553b0c632dbSHeiko Carstens 		schedule();
4554b0c632dbSHeiko Carstens 
455579395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
455679395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
455779395031SJens Freimann 		if (rc)
455879395031SJens Freimann 			return rc;
455979395031SJens Freimann 	}
45600ff31867SCarsten Otte 
45612c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
45622c70fe44SChristian Borntraeger 	if (rc)
45632c70fe44SChristian Borntraeger 		return rc;
45642c70fe44SChristian Borntraeger 
456527291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
456627291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
456727291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
456827291e21SDavid Hildenbrand 	}
456927291e21SDavid Hildenbrand 
45704eeef242SSean Christopherson 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
45719f30f621SMichael Mueller 
4572b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
45733fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
45743fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
45753fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
45762b29a9fdSDominik Dingel 
45773fb4c40fSThomas Huth 	return 0;
45783fb4c40fSThomas Huth }
45793fb4c40fSThomas Huth 
4580492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4581492d8642SThomas Huth {
458256317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
458356317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
458456317920SDavid Hildenbrand 	};
458556317920SDavid Hildenbrand 	u8 opcode, ilen;
4586492d8642SThomas Huth 	int rc;
4587492d8642SThomas Huth 
4588492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4589492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
4590492d8642SThomas Huth 
4591492d8642SThomas Huth 	/*
4592492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
4593492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
4594492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
4595492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
4596492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
4597492d8642SThomas Huth 	 * to be able to forward the PSW.
4598492d8642SThomas Huth 	 */
45993fa8cad7SDavid Hildenbrand 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
460056317920SDavid Hildenbrand 	ilen = insn_length(opcode);
46019b0d721aSDavid Hildenbrand 	if (rc < 0) {
46029b0d721aSDavid Hildenbrand 		return rc;
46039b0d721aSDavid Hildenbrand 	} else if (rc) {
46049b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
46059b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
46069b0d721aSDavid Hildenbrand 		 * nullification if necessary.
46079b0d721aSDavid Hildenbrand 		 */
46089b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
46099b0d721aSDavid Hildenbrand 		ilen = 4;
46109b0d721aSDavid Hildenbrand 	}
461156317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
461256317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
461356317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4614492d8642SThomas Huth }
4615492d8642SThomas Huth 
46163fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
46173fb4c40fSThomas Huth {
46184d62fcc0SQingFeng Hao 	struct mcck_volatile_info *mcck_info;
46194d62fcc0SQingFeng Hao 	struct sie_page *sie_page;
46204d62fcc0SQingFeng Hao 
46212b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
46222b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
46232b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
46242b29a9fdSDominik Dingel 
462527291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
462627291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
462727291e21SDavid Hildenbrand 
46287ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
46297ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
463071f116bfSDavid Hildenbrand 
46314d62fcc0SQingFeng Hao 	if (exit_reason == -EINTR) {
46324d62fcc0SQingFeng Hao 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
46334d62fcc0SQingFeng Hao 		sie_page = container_of(vcpu->arch.sie_block,
46344d62fcc0SQingFeng Hao 					struct sie_page, sie_block);
46354d62fcc0SQingFeng Hao 		mcck_info = &sie_page->mcck_info;
46364d62fcc0SQingFeng Hao 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
46374d62fcc0SQingFeng Hao 		return 0;
46384d62fcc0SQingFeng Hao 	}
46394d62fcc0SQingFeng Hao 
464071f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
464171f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
464271f116bfSDavid Hildenbrand 
464371f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
464471f116bfSDavid Hildenbrand 			return rc;
464571f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
464671f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
464771f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
464871f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
464971f116bfSDavid Hildenbrand 		return -EREMOTE;
465071f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
465171f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
465271f116bfSDavid Hildenbrand 		return 0;
4653210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4654210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4655210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
4656210b1607SThomas Huth 						current->thread.gmap_addr;
4657210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
465871f116bfSDavid Hildenbrand 		return -EREMOTE;
465924eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
46603c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
466124eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
466271f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
466371f116bfSDavid Hildenbrand 			return 0;
466450a05be4SChristian Borntraeger 		vcpu->stat.pfault_sync++;
466571f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4666fa576c58SThomas Huth 	}
466771f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
46683fb4c40fSThomas Huth }
46693fb4c40fSThomas Huth 
46703adae0b4SJanosch Frank #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
46713fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
46723fb4c40fSThomas Huth {
46733fb4c40fSThomas Huth 	int rc, exit_reason;
4674c8aac234SJanosch Frank 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
46753fb4c40fSThomas Huth 
4676800c1065SThomas Huth 	/*
4677800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4678800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
4679800c1065SThomas Huth 	 */
46802031f287SSean Christopherson 	kvm_vcpu_srcu_read_lock(vcpu);
4681800c1065SThomas Huth 
4682a76ccff6SThomas Huth 	do {
46833fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
46843fb4c40fSThomas Huth 		if (rc)
4685a76ccff6SThomas Huth 			break;
46863fb4c40fSThomas Huth 
46872031f287SSean Christopherson 		kvm_vcpu_srcu_read_unlock(vcpu);
46883fb4c40fSThomas Huth 		/*
4689a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
4690a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
46913fb4c40fSThomas Huth 		 */
46920097d12eSChristian Borntraeger 		local_irq_disable();
46936edaa530SPaolo Bonzini 		guest_enter_irqoff();
4694db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
46950097d12eSChristian Borntraeger 		local_irq_enable();
4696c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4697c8aac234SJanosch Frank 			memcpy(sie_page->pv_grregs,
4698c8aac234SJanosch Frank 			       vcpu->run->s.regs.gprs,
4699c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
4700c8aac234SJanosch Frank 		}
470156e62a73SSven Schnelle 		if (test_cpu_flag(CIF_FPU))
470256e62a73SSven Schnelle 			load_fpu_regs();
4703a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
4704a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
4705c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4706c8aac234SJanosch Frank 			memcpy(vcpu->run->s.regs.gprs,
4707c8aac234SJanosch Frank 			       sie_page->pv_grregs,
4708c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
47093adae0b4SJanosch Frank 			/*
47103adae0b4SJanosch Frank 			 * We're not allowed to inject interrupts on intercepts
47113adae0b4SJanosch Frank 			 * that leave the guest state in an "in-between" state
47123adae0b4SJanosch Frank 			 * where the next SIE entry will do a continuation.
47133adae0b4SJanosch Frank 			 * Fence interrupts in our "internal" PSW.
47143adae0b4SJanosch Frank 			 */
47153adae0b4SJanosch Frank 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
47163adae0b4SJanosch Frank 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
47173adae0b4SJanosch Frank 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
47183adae0b4SJanosch Frank 			}
4719c8aac234SJanosch Frank 		}
47200097d12eSChristian Borntraeger 		local_irq_disable();
4721db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
47226edaa530SPaolo Bonzini 		guest_exit_irqoff();
47230097d12eSChristian Borntraeger 		local_irq_enable();
47242031f287SSean Christopherson 		kvm_vcpu_srcu_read_lock(vcpu);
47253fb4c40fSThomas Huth 
47263fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
472727291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
47283fb4c40fSThomas Huth 
47292031f287SSean Christopherson 	kvm_vcpu_srcu_read_unlock(vcpu);
4730e168bf8dSCarsten Otte 	return rc;
4731b0c632dbSHeiko Carstens }
4732b0c632dbSHeiko Carstens 
47332f0a83beSTianjia Zhang static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4734b028ee3eSDavid Hildenbrand {
47352f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
47364d5f2c04SChristian Borntraeger 	struct runtime_instr_cb *riccb;
47374e0b1ab7SFan Zhang 	struct gs_cb *gscb;
47384d5f2c04SChristian Borntraeger 
47394d5f2c04SChristian Borntraeger 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
47404e0b1ab7SFan Zhang 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4741b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4742b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4743b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4744b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4745b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4746b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4747b028ee3eSDavid Hildenbrand 	}
4748b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4749b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4750b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4751b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
47529fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
47539fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
4754b028ee3eSDavid Hildenbrand 	}
475523a60f83SCollin Walling 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
475623a60f83SCollin Walling 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
475723a60f83SCollin Walling 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
47583fd8417fSCollin Walling 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
475923a60f83SCollin Walling 	}
476080cd8763SFan Zhang 	/*
476180cd8763SFan Zhang 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
476280cd8763SFan Zhang 	 * we should enable RI here instead of doing the lazy enablement.
476380cd8763SFan Zhang 	 */
476480cd8763SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
47654d5f2c04SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 64) &&
4766bb59c2daSAlice Frosi 	    riccb->v &&
47670c9d8683SDavid Hildenbrand 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
47684d5f2c04SChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
47690c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
477080cd8763SFan Zhang 	}
47714e0b1ab7SFan Zhang 	/*
47724e0b1ab7SFan Zhang 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
47734e0b1ab7SFan Zhang 	 * we should enable GS here instead of doing the lazy enablement.
47744e0b1ab7SFan Zhang 	 */
47754e0b1ab7SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
47764e0b1ab7SFan Zhang 	    test_kvm_facility(vcpu->kvm, 133) &&
47774e0b1ab7SFan Zhang 	    gscb->gssm &&
47784e0b1ab7SFan Zhang 	    !vcpu->arch.gs_enabled) {
47794e0b1ab7SFan Zhang 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
47804e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecb |= ECB_GS;
47814e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
47824e0b1ab7SFan Zhang 		vcpu->arch.gs_enabled = 1;
478380cd8763SFan Zhang 	}
478435b3fde6SChristian Borntraeger 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
478535b3fde6SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 82)) {
478635b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
478735b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
478835b3fde6SChristian Borntraeger 	}
47894e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
47904e0b1ab7SFan Zhang 		preempt_disable();
47914e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
47924e0b1ab7SFan Zhang 		if (current->thread.gs_cb) {
47934e0b1ab7SFan Zhang 			vcpu->arch.host_gscb = current->thread.gs_cb;
47944e0b1ab7SFan Zhang 			save_gs_cb(vcpu->arch.host_gscb);
47954e0b1ab7SFan Zhang 		}
47964e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled) {
47974e0b1ab7SFan Zhang 			current->thread.gs_cb = (struct gs_cb *)
47984e0b1ab7SFan Zhang 						&vcpu->run->s.regs.gscb;
47994e0b1ab7SFan Zhang 			restore_gs_cb(current->thread.gs_cb);
48004e0b1ab7SFan Zhang 		}
48014e0b1ab7SFan Zhang 		preempt_enable();
48024e0b1ab7SFan Zhang 	}
4803a3da7b4aSChristian Borntraeger 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4804811ea797SJanosch Frank }
4805811ea797SJanosch Frank 
48062f0a83beSTianjia Zhang static void sync_regs(struct kvm_vcpu *vcpu)
4807811ea797SJanosch Frank {
48082f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
48092f0a83beSTianjia Zhang 
4810811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4811811ea797SJanosch Frank 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4812811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4813811ea797SJanosch Frank 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4814811ea797SJanosch Frank 		/* some control register changes require a tlb flush */
4815811ea797SJanosch Frank 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4816811ea797SJanosch Frank 	}
4817811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4818811ea797SJanosch Frank 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4819811ea797SJanosch Frank 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4820811ea797SJanosch Frank 	}
4821811ea797SJanosch Frank 	save_access_regs(vcpu->arch.host_acrs);
4822811ea797SJanosch Frank 	restore_access_regs(vcpu->run->s.regs.acrs);
4823811ea797SJanosch Frank 	/* save host (userspace) fprs/vrs */
4824811ea797SJanosch Frank 	save_fpu_regs();
4825811ea797SJanosch Frank 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4826811ea797SJanosch Frank 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4827811ea797SJanosch Frank 	if (MACHINE_HAS_VX)
4828811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4829811ea797SJanosch Frank 	else
4830811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4831811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4832811ea797SJanosch Frank 	if (test_fp_ctl(current->thread.fpu.fpc))
4833811ea797SJanosch Frank 		/* User space provided an invalid FPC, let's clear it */
4834811ea797SJanosch Frank 		current->thread.fpu.fpc = 0;
4835811ea797SJanosch Frank 
4836811ea797SJanosch Frank 	/* Sync fmt2 only data */
4837811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
48382f0a83beSTianjia Zhang 		sync_regs_fmt2(vcpu);
4839811ea797SJanosch Frank 	} else {
4840811ea797SJanosch Frank 		/*
4841811ea797SJanosch Frank 		 * In several places we have to modify our internal view to
4842811ea797SJanosch Frank 		 * not do things that are disallowed by the ultravisor. For
4843811ea797SJanosch Frank 		 * example we must not inject interrupts after specific exits
4844811ea797SJanosch Frank 		 * (e.g. 112 prefix page not secure). We do this by turning
4845811ea797SJanosch Frank 		 * off the machine check, external and I/O interrupt bits
4846811ea797SJanosch Frank 		 * of our PSW copy. To avoid getting validity intercepts, we
4847811ea797SJanosch Frank 		 * do only accept the condition code from userspace.
4848811ea797SJanosch Frank 		 */
4849811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4850811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4851811ea797SJanosch Frank 						   PSW_MASK_CC;
4852811ea797SJanosch Frank 	}
485380cd8763SFan Zhang 
4854b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
4855b028ee3eSDavid Hildenbrand }
4856b028ee3eSDavid Hildenbrand 
48572f0a83beSTianjia Zhang static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4858b028ee3eSDavid Hildenbrand {
48592f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
48602f0a83beSTianjia Zhang 
4861b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4862b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4863b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
486435b3fde6SChristian Borntraeger 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
486523a60f83SCollin Walling 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
48664e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
486744bada28SHeiko Carstens 		preempt_disable();
48684e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
48694e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled)
48704e0b1ab7SFan Zhang 			save_gs_cb(current->thread.gs_cb);
48714e0b1ab7SFan Zhang 		current->thread.gs_cb = vcpu->arch.host_gscb;
48724e0b1ab7SFan Zhang 		restore_gs_cb(vcpu->arch.host_gscb);
48734e0b1ab7SFan Zhang 		if (!vcpu->arch.host_gscb)
48744e0b1ab7SFan Zhang 			__ctl_clear_bit(2, 4);
48754e0b1ab7SFan Zhang 		vcpu->arch.host_gscb = NULL;
487644bada28SHeiko Carstens 		preempt_enable();
48774e0b1ab7SFan Zhang 	}
4878a3da7b4aSChristian Borntraeger 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4879b028ee3eSDavid Hildenbrand }
4880b028ee3eSDavid Hildenbrand 
48812f0a83beSTianjia Zhang static void store_regs(struct kvm_vcpu *vcpu)
4882811ea797SJanosch Frank {
48832f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
48842f0a83beSTianjia Zhang 
4885811ea797SJanosch Frank 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4886811ea797SJanosch Frank 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4887811ea797SJanosch Frank 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4888811ea797SJanosch Frank 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4889811ea797SJanosch Frank 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4890811ea797SJanosch Frank 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4891811ea797SJanosch Frank 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4892811ea797SJanosch Frank 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4893811ea797SJanosch Frank 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4894811ea797SJanosch Frank 	save_access_regs(vcpu->run->s.regs.acrs);
4895811ea797SJanosch Frank 	restore_access_regs(vcpu->arch.host_acrs);
4896811ea797SJanosch Frank 	/* Save guest register state */
4897811ea797SJanosch Frank 	save_fpu_regs();
4898811ea797SJanosch Frank 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4899811ea797SJanosch Frank 	/* Restore will be done lazily at return */
4900811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4901811ea797SJanosch Frank 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4902811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
49032f0a83beSTianjia Zhang 		store_regs_fmt2(vcpu);
4904811ea797SJanosch Frank }
4905811ea797SJanosch Frank 
49061b94f6f8STianjia Zhang int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4907b0c632dbSHeiko Carstens {
49081b94f6f8STianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
49098f2abe6aSChristian Borntraeger 	int rc;
4910b0c632dbSHeiko Carstens 
49110460eb35SJanosch Frank 	/*
49120460eb35SJanosch Frank 	 * Running a VM while dumping always has the potential to
49130460eb35SJanosch Frank 	 * produce inconsistent dump data. But for PV vcpus a SIE
49140460eb35SJanosch Frank 	 * entry while dumping could also lead to a fatal validity
49150460eb35SJanosch Frank 	 * intercept which we absolutely want to avoid.
49160460eb35SJanosch Frank 	 */
49170460eb35SJanosch Frank 	if (vcpu->kvm->arch.pv.dumping)
49180460eb35SJanosch Frank 		return -EINVAL;
49190460eb35SJanosch Frank 
4920460df4c1SPaolo Bonzini 	if (kvm_run->immediate_exit)
4921460df4c1SPaolo Bonzini 		return -EINTR;
4922460df4c1SPaolo Bonzini 
4923200824f5SThomas Huth 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4924200824f5SThomas Huth 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4925200824f5SThomas Huth 		return -EINVAL;
4926200824f5SThomas Huth 
4927accb757dSChristoffer Dall 	vcpu_load(vcpu);
4928accb757dSChristoffer Dall 
492927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
493027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
4931accb757dSChristoffer Dall 		rc = 0;
4932accb757dSChristoffer Dall 		goto out;
493327291e21SDavid Hildenbrand 	}
493427291e21SDavid Hildenbrand 
493520b7035cSJan H. Schönherr 	kvm_sigset_activate(vcpu);
4936b0c632dbSHeiko Carstens 
4937fe28c786SJanosch Frank 	/*
4938fe28c786SJanosch Frank 	 * no need to check the return value of vcpu_start as it can only have
4939fe28c786SJanosch Frank 	 * an error for protvirt, but protvirt means user cpu state
4940fe28c786SJanosch Frank 	 */
49416352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
49426852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
49436352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
4944ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
49456352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
4946accb757dSChristoffer Dall 		rc = -EINVAL;
4947accb757dSChristoffer Dall 		goto out;
49486352e4d2SDavid Hildenbrand 	}
4949b0c632dbSHeiko Carstens 
49502f0a83beSTianjia Zhang 	sync_regs(vcpu);
4951db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
4952d7b0b5ebSCarsten Otte 
4953dab4079dSHeiko Carstens 	might_fault();
4954e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
49559ace903dSChristian Ehrhardt 
4956b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
4957b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
49588f2abe6aSChristian Borntraeger 		rc = -EINTR;
4959b1d16c49SChristian Ehrhardt 	}
49608f2abe6aSChristian Borntraeger 
496127291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
496227291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
496327291e21SDavid Hildenbrand 		rc = 0;
496427291e21SDavid Hildenbrand 	}
496527291e21SDavid Hildenbrand 
49668f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
496771f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
49688f2abe6aSChristian Borntraeger 		rc = 0;
49698f2abe6aSChristian Borntraeger 	}
49708f2abe6aSChristian Borntraeger 
4971db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
49722f0a83beSTianjia Zhang 	store_regs(vcpu);
4973d7b0b5ebSCarsten Otte 
497420b7035cSJan H. Schönherr 	kvm_sigset_deactivate(vcpu);
4975b0c632dbSHeiko Carstens 
4976b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
4977accb757dSChristoffer Dall out:
4978accb757dSChristoffer Dall 	vcpu_put(vcpu);
49797e8e6ab4SHeiko Carstens 	return rc;
4980b0c632dbSHeiko Carstens }
4981b0c632dbSHeiko Carstens 
4982b0c632dbSHeiko Carstens /*
4983b0c632dbSHeiko Carstens  * store status at address
4984b0c632dbSHeiko Carstens  * we use have two special cases:
4985b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4986b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4987b0c632dbSHeiko Carstens  */
4988d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4989b0c632dbSHeiko Carstens {
4990092670cdSCarsten Otte 	unsigned char archmode = 1;
49919abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
4992fda902cbSMichael Mueller 	unsigned int px;
49934287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
4994d0bce605SHeiko Carstens 	int rc;
4995b0c632dbSHeiko Carstens 
4996d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
4997d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4998d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
4999b0c632dbSHeiko Carstens 			return -EFAULT;
5000d9a3a09aSMartin Schwidefsky 		gpa = 0;
5001d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5002d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
5003b0c632dbSHeiko Carstens 			return -EFAULT;
5004d9a3a09aSMartin Schwidefsky 		gpa = px;
5005d9a3a09aSMartin Schwidefsky 	} else
5006d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
50079abc2a08SDavid Hildenbrand 
50089abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
50099abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
50109522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5011d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
50129abc2a08SDavid Hildenbrand 				     fprs, 128);
50139abc2a08SDavid Hildenbrand 	} else {
50149abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
50156fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
50169abc2a08SDavid Hildenbrand 	}
5017d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5018d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
5019d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5020d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
5021d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5022fda902cbSMichael Mueller 			      &px, 4);
5023d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
50249abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
5025d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5026d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
50274287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
5028d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
50294287f247SDavid Hildenbrand 			      &cputm, 8);
5030178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5031d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5032d0bce605SHeiko Carstens 			      &clkcomp, 8);
5033d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5034d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
5035d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5036d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
5037d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
5038b0c632dbSHeiko Carstens }
5039b0c632dbSHeiko Carstens 
5040e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5041e879892cSThomas Huth {
5042e879892cSThomas Huth 	/*
5043e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
504431d8b8d4SChristian Borntraeger 	 * switch in the run ioctl. Let's update our copies before we save
5045e879892cSThomas Huth 	 * it into the save area
5046e879892cSThomas Huth 	 */
5047d0164ee2SHendrik Brueckner 	save_fpu_regs();
50489abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5049e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
5050e879892cSThomas Huth 
5051e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
5052e879892cSThomas Huth }
5053e879892cSThomas Huth 
50548ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
50558ad35755SDavid Hildenbrand {
50568ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
50578e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
50588ad35755SDavid Hildenbrand }
50598ad35755SDavid Hildenbrand 
50608ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
50618ad35755SDavid Hildenbrand {
506246808a4cSMarc Zyngier 	unsigned long i;
50638ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
50648ad35755SDavid Hildenbrand 
50658ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
50668ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
50678ad35755SDavid Hildenbrand 	}
50688ad35755SDavid Hildenbrand }
50698ad35755SDavid Hildenbrand 
50708ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
50718ad35755SDavid Hildenbrand {
507209a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
507309a400e7SDavid Hildenbrand 		return;
50748ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
50758e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
50768ad35755SDavid Hildenbrand }
50778ad35755SDavid Hildenbrand 
5078fe28c786SJanosch Frank int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
50796852d7b6SDavid Hildenbrand {
5080fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
50818ad35755SDavid Hildenbrand 
50828ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
5083fe28c786SJanosch Frank 		return 0;
50848ad35755SDavid Hildenbrand 
50856852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
50868ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5087433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
50888ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
50898ad35755SDavid Hildenbrand 
5090fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the operating state */
5091fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5092fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5093fe28c786SJanosch Frank 		if (r) {
5094fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5095fe28c786SJanosch Frank 			return r;
5096fe28c786SJanosch Frank 		}
5097fe28c786SJanosch Frank 	}
5098fe28c786SJanosch Frank 
50998ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
5100113d10bcSMarc Zyngier 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
51018ad35755SDavid Hildenbrand 			started_vcpus++;
51028ad35755SDavid Hildenbrand 	}
51038ad35755SDavid Hildenbrand 
51048ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
51058ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
51068ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
51078ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
51088ad35755SDavid Hildenbrand 		/*
51098ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
51108ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
511138860756SBhaskar Chowdhury 		 * outstanding ENABLE requests.
51128ad35755SDavid Hildenbrand 		 */
51138ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
51148ad35755SDavid Hildenbrand 	}
51158ad35755SDavid Hildenbrand 
51169daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
51178ad35755SDavid Hildenbrand 	/*
511872f21820SChristian Borntraeger 	 * The real PSW might have changed due to a RESTART interpreted by the
511972f21820SChristian Borntraeger 	 * ultravisor. We block all interrupts and let the next sie exit
512072f21820SChristian Borntraeger 	 * refresh our view.
512172f21820SChristian Borntraeger 	 */
512272f21820SChristian Borntraeger 	if (kvm_s390_pv_cpu_is_protected(vcpu))
512372f21820SChristian Borntraeger 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
512472f21820SChristian Borntraeger 	/*
51258ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
51268ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
51278ad35755SDavid Hildenbrand 	 */
5128d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5129433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5130fe28c786SJanosch Frank 	return 0;
51316852d7b6SDavid Hildenbrand }
51326852d7b6SDavid Hildenbrand 
5133fe28c786SJanosch Frank int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
51346852d7b6SDavid Hildenbrand {
5135fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
51368ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
51378ad35755SDavid Hildenbrand 
51388ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
5139fe28c786SJanosch Frank 		return 0;
51408ad35755SDavid Hildenbrand 
51416852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
51428ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5143433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
51448ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
51458ad35755SDavid Hildenbrand 
5146fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the stopped state */
5147fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5148fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5149fe28c786SJanosch Frank 		if (r) {
5150fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5151fe28c786SJanosch Frank 			return r;
5152fe28c786SJanosch Frank 		}
5153fe28c786SJanosch Frank 	}
5154fe28c786SJanosch Frank 
5155812de046SEric Farman 	/*
5156812de046SEric Farman 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5157812de046SEric Farman 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5158812de046SEric Farman 	 * have been fully processed. This will ensure that the VCPU
5159812de046SEric Farman 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5160812de046SEric Farman 	 */
5161812de046SEric Farman 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
51626cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
516332f5ff63SDavid Hildenbrand 
51648ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
51658ad35755SDavid Hildenbrand 
51668ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
5167113d10bcSMarc Zyngier 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5168113d10bcSMarc Zyngier 
5169113d10bcSMarc Zyngier 		if (!is_vcpu_stopped(tmp)) {
51708ad35755SDavid Hildenbrand 			started_vcpus++;
5171113d10bcSMarc Zyngier 			started_vcpu = tmp;
51728ad35755SDavid Hildenbrand 		}
51738ad35755SDavid Hildenbrand 	}
51748ad35755SDavid Hildenbrand 
51758ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
51768ad35755SDavid Hildenbrand 		/*
51778ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
51788ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
51798ad35755SDavid Hildenbrand 		 */
51808ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
51818ad35755SDavid Hildenbrand 	}
51828ad35755SDavid Hildenbrand 
5183433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5184fe28c786SJanosch Frank 	return 0;
51856852d7b6SDavid Hildenbrand }
51866852d7b6SDavid Hildenbrand 
5187d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5188d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
5189d6712df9SCornelia Huck {
5190d6712df9SCornelia Huck 	int r;
5191d6712df9SCornelia Huck 
5192d6712df9SCornelia Huck 	if (cap->flags)
5193d6712df9SCornelia Huck 		return -EINVAL;
5194d6712df9SCornelia Huck 
5195d6712df9SCornelia Huck 	switch (cap->cap) {
5196fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
5197fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
5198fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
5199c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5200fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
5201fa6b7fe9SCornelia Huck 		}
5202fa6b7fe9SCornelia Huck 		r = 0;
5203fa6b7fe9SCornelia Huck 		break;
5204d6712df9SCornelia Huck 	default:
5205d6712df9SCornelia Huck 		r = -EINVAL;
5206d6712df9SCornelia Huck 		break;
5207d6712df9SCornelia Huck 	}
5208d6712df9SCornelia Huck 	return r;
5209d6712df9SCornelia Huck }
5210d6712df9SCornelia Huck 
52110e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
521219e12277SJanosch Frank 				  struct kvm_s390_mem_op *mop)
521319e12277SJanosch Frank {
521419e12277SJanosch Frank 	void __user *uaddr = (void __user *)mop->buf;
5215b99f4512SNico Boehr 	void *sida_addr;
521619e12277SJanosch Frank 	int r = 0;
521719e12277SJanosch Frank 
521819e12277SJanosch Frank 	if (mop->flags || !mop->size)
521919e12277SJanosch Frank 		return -EINVAL;
522019e12277SJanosch Frank 	if (mop->size + mop->sida_offset < mop->size)
522119e12277SJanosch Frank 		return -EINVAL;
522219e12277SJanosch Frank 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
522319e12277SJanosch Frank 		return -E2BIG;
52242c212e1bSJanis Schoetterl-Glausch 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
52252c212e1bSJanis Schoetterl-Glausch 		return -EINVAL;
522619e12277SJanosch Frank 
5227b99f4512SNico Boehr 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5228b99f4512SNico Boehr 
522919e12277SJanosch Frank 	switch (mop->op) {
523019e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
5231b99f4512SNico Boehr 		if (copy_to_user(uaddr, sida_addr, mop->size))
523219e12277SJanosch Frank 			r = -EFAULT;
523319e12277SJanosch Frank 
523419e12277SJanosch Frank 		break;
523519e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
5236b99f4512SNico Boehr 		if (copy_from_user(sida_addr, uaddr, mop->size))
523719e12277SJanosch Frank 			r = -EFAULT;
523819e12277SJanosch Frank 		break;
523919e12277SJanosch Frank 	}
524019e12277SJanosch Frank 	return r;
524119e12277SJanosch Frank }
52420e1234c0SJanis Schoetterl-Glausch 
52430e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
524441408c28SThomas Huth 				 struct kvm_s390_mem_op *mop)
524541408c28SThomas Huth {
524641408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
524741408c28SThomas Huth 	void *tmpbuf = NULL;
524819e12277SJanosch Frank 	int r = 0;
524941408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
5250e9e9feebSJanis Schoetterl-Glausch 				    | KVM_S390_MEMOP_F_CHECK_ONLY
5251e9e9feebSJanis Schoetterl-Glausch 				    | KVM_S390_MEMOP_F_SKEY_PROTECTION;
525241408c28SThomas Huth 
5253a13b03bbSThomas Huth 	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
525441408c28SThomas Huth 		return -EINVAL;
525541408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
525641408c28SThomas Huth 		return -E2BIG;
525719e12277SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu))
525819e12277SJanosch Frank 		return -EINVAL;
5259e9e9feebSJanis Schoetterl-Glausch 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
5260e9e9feebSJanis Schoetterl-Glausch 		if (access_key_invalid(mop->key))
5261e9e9feebSJanis Schoetterl-Glausch 			return -EINVAL;
5262e9e9feebSJanis Schoetterl-Glausch 	} else {
5263e9e9feebSJanis Schoetterl-Glausch 		mop->key = 0;
5264e9e9feebSJanis Schoetterl-Glausch 	}
526541408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
526641408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
526741408c28SThomas Huth 		if (!tmpbuf)
526841408c28SThomas Huth 			return -ENOMEM;
526941408c28SThomas Huth 	}
527041408c28SThomas Huth 
527141408c28SThomas Huth 	switch (mop->op) {
527241408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
527341408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5274e9e9feebSJanis Schoetterl-Glausch 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5275e9e9feebSJanis Schoetterl-Glausch 					    GACC_FETCH, mop->key);
527641408c28SThomas Huth 			break;
527741408c28SThomas Huth 		}
5278e9e9feebSJanis Schoetterl-Glausch 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5279e9e9feebSJanis Schoetterl-Glausch 					mop->size, mop->key);
528041408c28SThomas Huth 		if (r == 0) {
528141408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
528241408c28SThomas Huth 				r = -EFAULT;
528341408c28SThomas Huth 		}
528441408c28SThomas Huth 		break;
528541408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
528641408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5287e9e9feebSJanis Schoetterl-Glausch 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5288e9e9feebSJanis Schoetterl-Glausch 					    GACC_STORE, mop->key);
528941408c28SThomas Huth 			break;
529041408c28SThomas Huth 		}
529141408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
529241408c28SThomas Huth 			r = -EFAULT;
529341408c28SThomas Huth 			break;
529441408c28SThomas Huth 		}
5295e9e9feebSJanis Schoetterl-Glausch 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5296e9e9feebSJanis Schoetterl-Glausch 					 mop->size, mop->key);
529741408c28SThomas Huth 		break;
529841408c28SThomas Huth 	}
529941408c28SThomas Huth 
530041408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
530141408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
530241408c28SThomas Huth 
530341408c28SThomas Huth 	vfree(tmpbuf);
530441408c28SThomas Huth 	return r;
530541408c28SThomas Huth }
530641408c28SThomas Huth 
53070e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
530819e12277SJanosch Frank 				     struct kvm_s390_mem_op *mop)
530919e12277SJanosch Frank {
531019e12277SJanosch Frank 	int r, srcu_idx;
531119e12277SJanosch Frank 
531219e12277SJanosch Frank 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
531319e12277SJanosch Frank 
531419e12277SJanosch Frank 	switch (mop->op) {
531519e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_READ:
531619e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_WRITE:
53170e1234c0SJanis Schoetterl-Glausch 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
531819e12277SJanosch Frank 		break;
531919e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
532019e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
532119e12277SJanosch Frank 		/* we are locked against sida going away by the vcpu->mutex */
53220e1234c0SJanis Schoetterl-Glausch 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
532319e12277SJanosch Frank 		break;
532419e12277SJanosch Frank 	default:
532519e12277SJanosch Frank 		r = -EINVAL;
532619e12277SJanosch Frank 	}
532719e12277SJanosch Frank 
532819e12277SJanosch Frank 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
532919e12277SJanosch Frank 	return r;
533019e12277SJanosch Frank }
533119e12277SJanosch Frank 
53325cb0944cSPaolo Bonzini long kvm_arch_vcpu_async_ioctl(struct file *filp,
5333b0c632dbSHeiko Carstens 			       unsigned int ioctl, unsigned long arg)
5334b0c632dbSHeiko Carstens {
5335b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
5336b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
5337b0c632dbSHeiko Carstens 
533893736624SAvi Kivity 	switch (ioctl) {
533947b43c52SJens Freimann 	case KVM_S390_IRQ: {
534047b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
534147b43c52SJens Freimann 
534247b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
53439b062471SChristoffer Dall 			return -EFAULT;
53449b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
534547b43c52SJens Freimann 	}
534693736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
5347ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
534853936b5bSThomas Huth 		struct kvm_s390_irq s390irq = {};
5349ba5c1e9bSCarsten Otte 
5350ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
53519b062471SChristoffer Dall 			return -EFAULT;
5352383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
5353383d0b05SJens Freimann 			return -EINVAL;
53549b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
5355ba5c1e9bSCarsten Otte 	}
53569b062471SChristoffer Dall 	}
53575cb0944cSPaolo Bonzini 	return -ENOIOCTLCMD;
53585cb0944cSPaolo Bonzini }
53595cb0944cSPaolo Bonzini 
53608aba0958SJanosch Frank static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
53618aba0958SJanosch Frank 					struct kvm_pv_cmd *cmd)
53628aba0958SJanosch Frank {
53638aba0958SJanosch Frank 	struct kvm_s390_pv_dmp dmp;
53648aba0958SJanosch Frank 	void *data;
53658aba0958SJanosch Frank 	int ret;
53668aba0958SJanosch Frank 
53678aba0958SJanosch Frank 	/* Dump initialization is a prerequisite */
53688aba0958SJanosch Frank 	if (!vcpu->kvm->arch.pv.dumping)
53698aba0958SJanosch Frank 		return -EINVAL;
53708aba0958SJanosch Frank 
53718aba0958SJanosch Frank 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
53728aba0958SJanosch Frank 		return -EFAULT;
53738aba0958SJanosch Frank 
53748aba0958SJanosch Frank 	/* We only handle this subcmd right now */
53758aba0958SJanosch Frank 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
53768aba0958SJanosch Frank 		return -EINVAL;
53778aba0958SJanosch Frank 
53788aba0958SJanosch Frank 	/* CPU dump length is the same as create cpu storage donation. */
53798aba0958SJanosch Frank 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
53808aba0958SJanosch Frank 		return -EINVAL;
53818aba0958SJanosch Frank 
53828aba0958SJanosch Frank 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
53838aba0958SJanosch Frank 	if (!data)
53848aba0958SJanosch Frank 		return -ENOMEM;
53858aba0958SJanosch Frank 
53868aba0958SJanosch Frank 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
53878aba0958SJanosch Frank 
53888aba0958SJanosch Frank 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
53898aba0958SJanosch Frank 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
53908aba0958SJanosch Frank 
53918aba0958SJanosch Frank 	if (ret)
53928aba0958SJanosch Frank 		ret = -EINVAL;
53938aba0958SJanosch Frank 
53948aba0958SJanosch Frank 	/* On success copy over the dump data */
53958aba0958SJanosch Frank 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
53968aba0958SJanosch Frank 		ret = -EFAULT;
53978aba0958SJanosch Frank 
53988aba0958SJanosch Frank 	kvfree(data);
53998aba0958SJanosch Frank 	return ret;
54008aba0958SJanosch Frank }
54018aba0958SJanosch Frank 
54025cb0944cSPaolo Bonzini long kvm_arch_vcpu_ioctl(struct file *filp,
54035cb0944cSPaolo Bonzini 			 unsigned int ioctl, unsigned long arg)
54045cb0944cSPaolo Bonzini {
54055cb0944cSPaolo Bonzini 	struct kvm_vcpu *vcpu = filp->private_data;
54065cb0944cSPaolo Bonzini 	void __user *argp = (void __user *)arg;
54075cb0944cSPaolo Bonzini 	int idx;
54085cb0944cSPaolo Bonzini 	long r;
54098a8378faSJanosch Frank 	u16 rc, rrc;
54109b062471SChristoffer Dall 
54119b062471SChristoffer Dall 	vcpu_load(vcpu);
54129b062471SChristoffer Dall 
54139b062471SChristoffer Dall 	switch (ioctl) {
5414b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
5415800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
541655680890SChristian Borntraeger 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5417800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5418bc923cc9SAvi Kivity 		break;
5419b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
5420b0c632dbSHeiko Carstens 		psw_t psw;
5421b0c632dbSHeiko Carstens 
5422bc923cc9SAvi Kivity 		r = -EFAULT;
5423b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
5424bc923cc9SAvi Kivity 			break;
5425bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5426bc923cc9SAvi Kivity 		break;
5427b0c632dbSHeiko Carstens 	}
54287de3f142SJanosch Frank 	case KVM_S390_CLEAR_RESET:
54297de3f142SJanosch Frank 		r = 0;
54307de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
54318a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
54328a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
54338a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
54348a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
54358a8378faSJanosch Frank 				   rc, rrc);
54368a8378faSJanosch Frank 		}
54377de3f142SJanosch Frank 		break;
5438b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
54397de3f142SJanosch Frank 		r = 0;
54407de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
54418a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
54428a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
54438a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_INITIAL,
54448a8378faSJanosch Frank 					  &rc, &rrc);
54458a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
54468a8378faSJanosch Frank 				   rc, rrc);
54478a8378faSJanosch Frank 		}
54487de3f142SJanosch Frank 		break;
54497de3f142SJanosch Frank 	case KVM_S390_NORMAL_RESET:
54507de3f142SJanosch Frank 		r = 0;
54517de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
54528a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
54538a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
54548a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET, &rc, &rrc);
54558a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
54568a8378faSJanosch Frank 				   rc, rrc);
54578a8378faSJanosch Frank 		}
5458bc923cc9SAvi Kivity 		break;
545914eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
546014eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
546114eebd91SCarsten Otte 		struct kvm_one_reg reg;
546268cf7b1fSJanosch Frank 		r = -EINVAL;
546368cf7b1fSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu))
546468cf7b1fSJanosch Frank 			break;
546514eebd91SCarsten Otte 		r = -EFAULT;
546614eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
546714eebd91SCarsten Otte 			break;
546814eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
546914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
547014eebd91SCarsten Otte 		else
547114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
547214eebd91SCarsten Otte 		break;
547314eebd91SCarsten Otte 	}
547427e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
547527e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
547627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
547727e0393fSCarsten Otte 
547827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
547927e0393fSCarsten Otte 			r = -EFAULT;
548027e0393fSCarsten Otte 			break;
548127e0393fSCarsten Otte 		}
548227e0393fSCarsten Otte 
548327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
548427e0393fSCarsten Otte 			r = -EINVAL;
548527e0393fSCarsten Otte 			break;
548627e0393fSCarsten Otte 		}
548727e0393fSCarsten Otte 
548827e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
548927e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
549027e0393fSCarsten Otte 		break;
549127e0393fSCarsten Otte 	}
549227e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
549327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
549427e0393fSCarsten Otte 
549527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
549627e0393fSCarsten Otte 			r = -EFAULT;
549727e0393fSCarsten Otte 			break;
549827e0393fSCarsten Otte 		}
549927e0393fSCarsten Otte 
550027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
550127e0393fSCarsten Otte 			r = -EINVAL;
550227e0393fSCarsten Otte 			break;
550327e0393fSCarsten Otte 		}
550427e0393fSCarsten Otte 
550527e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
550627e0393fSCarsten Otte 			ucasmap.length);
550727e0393fSCarsten Otte 		break;
550827e0393fSCarsten Otte 	}
550927e0393fSCarsten Otte #endif
5510ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
5511527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5512ccc7910fSCarsten Otte 		break;
5513ccc7910fSCarsten Otte 	}
5514d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
5515d6712df9SCornelia Huck 	{
5516d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
5517d6712df9SCornelia Huck 		r = -EFAULT;
5518d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
5519d6712df9SCornelia Huck 			break;
5520d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5521d6712df9SCornelia Huck 		break;
5522d6712df9SCornelia Huck 	}
552341408c28SThomas Huth 	case KVM_S390_MEM_OP: {
552441408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
552541408c28SThomas Huth 
552641408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
55270e1234c0SJanis Schoetterl-Glausch 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
552841408c28SThomas Huth 		else
552941408c28SThomas Huth 			r = -EFAULT;
553041408c28SThomas Huth 		break;
553141408c28SThomas Huth 	}
5532816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
5533816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5534816c7667SJens Freimann 
5535816c7667SJens Freimann 		r = -EFAULT;
5536816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5537816c7667SJens Freimann 			break;
5538816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5539816c7667SJens Freimann 		    irq_state.len == 0 ||
5540816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5541816c7667SJens Freimann 			r = -EINVAL;
5542816c7667SJens Freimann 			break;
5543816c7667SJens Freimann 		}
5544bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5545816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
5546816c7667SJens Freimann 					   (void __user *) irq_state.buf,
5547816c7667SJens Freimann 					   irq_state.len);
5548816c7667SJens Freimann 		break;
5549816c7667SJens Freimann 	}
5550816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
5551816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5552816c7667SJens Freimann 
5553816c7667SJens Freimann 		r = -EFAULT;
5554816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5555816c7667SJens Freimann 			break;
5556816c7667SJens Freimann 		if (irq_state.len == 0) {
5557816c7667SJens Freimann 			r = -EINVAL;
5558816c7667SJens Freimann 			break;
5559816c7667SJens Freimann 		}
5560bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5561816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
5562816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
5563816c7667SJens Freimann 					   irq_state.len);
5564816c7667SJens Freimann 		break;
5565816c7667SJens Freimann 	}
55668aba0958SJanosch Frank 	case KVM_S390_PV_CPU_COMMAND: {
55678aba0958SJanosch Frank 		struct kvm_pv_cmd cmd;
55688aba0958SJanosch Frank 
55698aba0958SJanosch Frank 		r = -EINVAL;
55708aba0958SJanosch Frank 		if (!is_prot_virt_host())
55718aba0958SJanosch Frank 			break;
55728aba0958SJanosch Frank 
55738aba0958SJanosch Frank 		r = -EFAULT;
55748aba0958SJanosch Frank 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
55758aba0958SJanosch Frank 			break;
55768aba0958SJanosch Frank 
55778aba0958SJanosch Frank 		r = -EINVAL;
55788aba0958SJanosch Frank 		if (cmd.flags)
55798aba0958SJanosch Frank 			break;
55808aba0958SJanosch Frank 
55818aba0958SJanosch Frank 		/* We only handle this cmd right now */
55828aba0958SJanosch Frank 		if (cmd.cmd != KVM_PV_DUMP)
55838aba0958SJanosch Frank 			break;
55848aba0958SJanosch Frank 
55858aba0958SJanosch Frank 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
55868aba0958SJanosch Frank 
55878aba0958SJanosch Frank 		/* Always copy over UV rc / rrc data */
55888aba0958SJanosch Frank 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
55898aba0958SJanosch Frank 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
55908aba0958SJanosch Frank 			r = -EFAULT;
55918aba0958SJanosch Frank 		break;
55928aba0958SJanosch Frank 	}
5593b0c632dbSHeiko Carstens 	default:
55943e6afcf1SCarsten Otte 		r = -ENOTTY;
5595b0c632dbSHeiko Carstens 	}
55969b062471SChristoffer Dall 
55979b062471SChristoffer Dall 	vcpu_put(vcpu);
5598bc923cc9SAvi Kivity 	return r;
5599b0c632dbSHeiko Carstens }
5600b0c632dbSHeiko Carstens 
56011499fa80SSouptick Joarder vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
56025b1c1493SCarsten Otte {
56035b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
56045b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
56055b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
56065b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
56075b1c1493SCarsten Otte 		get_page(vmf->page);
56085b1c1493SCarsten Otte 		return 0;
56095b1c1493SCarsten Otte 	}
56105b1c1493SCarsten Otte #endif
56115b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
56125b1c1493SCarsten Otte }
56135b1c1493SCarsten Otte 
5614d663b8a2SPaolo Bonzini bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5615d663b8a2SPaolo Bonzini {
5616d663b8a2SPaolo Bonzini 	return true;
5617d663b8a2SPaolo Bonzini }
5618d663b8a2SPaolo Bonzini 
5619b0c632dbSHeiko Carstens /* Section: memory related */
5620f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
5621537a17b3SSean Christopherson 				   const struct kvm_memory_slot *old,
5622537a17b3SSean Christopherson 				   struct kvm_memory_slot *new,
56237b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
5624b0c632dbSHeiko Carstens {
5625ec5c8697SSean Christopherson 	gpa_t size;
5626ec5c8697SSean Christopherson 
5627ec5c8697SSean Christopherson 	/* When we are protected, we should not change the memory slots */
5628ec5c8697SSean Christopherson 	if (kvm_s390_pv_get_handle(kvm))
5629ec5c8697SSean Christopherson 		return -EINVAL;
5630ec5c8697SSean Christopherson 
5631ec5c8697SSean Christopherson 	if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
5632ec5c8697SSean Christopherson 		return 0;
5633cf5b4869SSean Christopherson 
5634dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
5635dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
5636dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
5637dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
5638b0c632dbSHeiko Carstens 
5639cf5b4869SSean Christopherson 	if (new->userspace_addr & 0xffffful)
5640b0c632dbSHeiko Carstens 		return -EINVAL;
5641b0c632dbSHeiko Carstens 
5642ec5c8697SSean Christopherson 	size = new->npages * PAGE_SIZE;
5643cf5b4869SSean Christopherson 	if (size & 0xffffful)
5644b0c632dbSHeiko Carstens 		return -EINVAL;
5645b0c632dbSHeiko Carstens 
5646cf5b4869SSean Christopherson 	if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5647a3a92c31SDominik Dingel 		return -EINVAL;
5648a3a92c31SDominik Dingel 
5649f7784b8eSMarcelo Tosatti 	return 0;
5650f7784b8eSMarcelo Tosatti }
5651f7784b8eSMarcelo Tosatti 
5652f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
56539d4c197cSSean Christopherson 				struct kvm_memory_slot *old,
5654f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
56558482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
5656f7784b8eSMarcelo Tosatti {
565719ec166cSChristian Borntraeger 	int rc = 0;
5658f7784b8eSMarcelo Tosatti 
565919ec166cSChristian Borntraeger 	switch (change) {
566019ec166cSChristian Borntraeger 	case KVM_MR_DELETE:
566119ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
566219ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
566319ec166cSChristian Borntraeger 		break;
566419ec166cSChristian Borntraeger 	case KVM_MR_MOVE:
566519ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
566619ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
566719ec166cSChristian Borntraeger 		if (rc)
566819ec166cSChristian Borntraeger 			break;
56693b684a42SJoe Perches 		fallthrough;
567019ec166cSChristian Borntraeger 	case KVM_MR_CREATE:
5671cf5b4869SSean Christopherson 		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5672cf5b4869SSean Christopherson 				      new->base_gfn * PAGE_SIZE,
5673cf5b4869SSean Christopherson 				      new->npages * PAGE_SIZE);
567419ec166cSChristian Borntraeger 		break;
567519ec166cSChristian Borntraeger 	case KVM_MR_FLAGS_ONLY:
567619ec166cSChristian Borntraeger 		break;
567719ec166cSChristian Borntraeger 	default:
567819ec166cSChristian Borntraeger 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
567919ec166cSChristian Borntraeger 	}
5680598841caSCarsten Otte 	if (rc)
5681ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
5682598841caSCarsten Otte 	return;
5683b0c632dbSHeiko Carstens }
5684b0c632dbSHeiko Carstens 
568560a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
568660a37709SAlexander Yarygin {
568760a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
568860a37709SAlexander Yarygin 
568960a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
569060a37709SAlexander Yarygin }
569160a37709SAlexander Yarygin 
5692b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
5693b0c632dbSHeiko Carstens {
5694b8449265SSean Christopherson 	int i, r;
569560a37709SAlexander Yarygin 
569607197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
56978d43d570SMichael Mueller 		pr_info("SIE is not available\n");
569807197fd0SDavid Hildenbrand 		return -ENODEV;
569907197fd0SDavid Hildenbrand 	}
570007197fd0SDavid Hildenbrand 
5701a4499382SJanosch Frank 	if (nested && hpage) {
57028d43d570SMichael Mueller 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5703a4499382SJanosch Frank 		return -EINVAL;
5704a4499382SJanosch Frank 	}
5705a4499382SJanosch Frank 
570660a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
5707c3b9e3e1SChristian Borntraeger 		kvm_s390_fac_base[i] |=
570817e89e13SSven Schnelle 			stfle_fac_list[i] & nonhyp_mask(i);
570960a37709SAlexander Yarygin 
5710b8449265SSean Christopherson 	r = __kvm_s390_init();
5711b8449265SSean Christopherson 	if (r)
5712b8449265SSean Christopherson 		return r;
5713b8449265SSean Christopherson 
5714*81a1cf9fSSean Christopherson 	r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5715b8449265SSean Christopherson 	if (r) {
5716b8449265SSean Christopherson 		__kvm_s390_exit();
5717b8449265SSean Christopherson 		return r;
5718b8449265SSean Christopherson 	}
5719b8449265SSean Christopherson 	return 0;
5720b0c632dbSHeiko Carstens }
5721b0c632dbSHeiko Carstens 
5722b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
5723b0c632dbSHeiko Carstens {
5724b0c632dbSHeiko Carstens 	kvm_exit();
5725b8449265SSean Christopherson 
5726b8449265SSean Christopherson 	__kvm_s390_exit();
5727b0c632dbSHeiko Carstens }
5728b0c632dbSHeiko Carstens 
5729b0c632dbSHeiko Carstens module_init(kvm_s390_init);
5730b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
5731566af940SCornelia Huck 
5732566af940SCornelia Huck /*
5733566af940SCornelia Huck  * Enable autoloading of the kvm module.
5734566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5735566af940SCornelia Huck  * since x86 takes a different approach.
5736566af940SCornelia Huck  */
5737566af940SCornelia Huck #include <linux/miscdevice.h>
5738566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
5739566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
5740