xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision be48d86f77f0cbceecadf10fda6330d82a0a77b7)
1d809aa23SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b0c632dbSHeiko Carstens /*
3bb64da9aSChristian Borntraeger  * hosting IBM Z kernel virtual machines (s390x)
4b0c632dbSHeiko Carstens  *
53e6c5568SJanosch Frank  * Copyright IBM Corp. 2008, 2020
6b0c632dbSHeiko Carstens  *
7b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
9628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1015f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
11b0c632dbSHeiko Carstens  */
12b0c632dbSHeiko Carstens 
137aedd9d4SMichael Mueller #define KMSG_COMPONENT "kvm-s390"
147aedd9d4SMichael Mueller #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
157aedd9d4SMichael Mueller 
16b0c632dbSHeiko Carstens #include <linux/compiler.h>
17b0c632dbSHeiko Carstens #include <linux/err.h>
18b0c632dbSHeiko Carstens #include <linux/fs.h>
19ca872302SChristian Borntraeger #include <linux/hrtimer.h>
20b0c632dbSHeiko Carstens #include <linux/init.h>
21b0c632dbSHeiko Carstens #include <linux/kvm.h>
22b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
23b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25d3217967SPaul Gortmaker #include <linux/moduleparam.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31174cd4b1SIngo Molnar #include <linux/sched/signal.h>
32190df4a2SClaudio Imbrenda #include <linux/string.h>
3365fddcfcSMike Rapoport #include <linux/pgtable.h>
34174cd4b1SIngo Molnar 
35cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
36b0c632dbSHeiko Carstens #include <asm/lowcore.h>
37fd5ada04SMartin Schwidefsky #include <asm/stp.h>
381e133ab2SMartin Schwidefsky #include <asm/gmap.h>
39f5daba1dSHeiko Carstens #include <asm/nmi.h>
40a0616cdeSDavid Howells #include <asm/switch_to.h>
416d3da241SJens Freimann #include <asm/isc.h>
421526bf9cSChristian Borntraeger #include <asm/sclp.h>
430a763c78SDavid Hildenbrand #include <asm/cpacf.h>
44221bb8a4SLinus Torvalds #include <asm/timex.h>
45e585b24aSTony Krowiak #include <asm/ap.h>
4629b40f10SJanosch Frank #include <asm/uv.h>
4756e62a73SSven Schnelle #include <asm/fpu/api.h>
488f2abe6aSChristian Borntraeger #include "kvm-s390.h"
49b0c632dbSHeiko Carstens #include "gaccess.h"
5098b1d33dSMatthew Rosato #include "pci.h"
51b0c632dbSHeiko Carstens 
525786fffaSCornelia Huck #define CREATE_TRACE_POINTS
535786fffaSCornelia Huck #include "trace.h"
54ade38c31SCornelia Huck #include "trace-s390.h"
555786fffaSCornelia Huck 
5641408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
57816c7667SJens Freimann #define LOCAL_IRQS 32
58816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
6041408c28SThomas Huth 
61fcfe1baeSJing Zhang const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62fcfe1baeSJing Zhang 	KVM_GENERIC_VM_STATS(),
63fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_io),
64fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_float_mchk),
65fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_pfault_done),
66fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_service_signal),
6773f91b00SMatthew Rosato 	STATS_DESC_COUNTER(VM, inject_virtio),
6873f91b00SMatthew Rosato 	STATS_DESC_COUNTER(VM, aen_forward)
69fcfe1baeSJing Zhang };
70fcfe1baeSJing Zhang 
71fcfe1baeSJing Zhang const struct kvm_stats_header kvm_vm_stats_header = {
72fcfe1baeSJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
73fcfe1baeSJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
74fcfe1baeSJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
75fcfe1baeSJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
76fcfe1baeSJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
77fcfe1baeSJing Zhang 		       sizeof(kvm_vm_stats_desc),
78fcfe1baeSJing Zhang };
79fcfe1baeSJing Zhang 
80ce55c049SJing Zhang const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
81ce55c049SJing Zhang 	KVM_GENERIC_VCPU_STATS(),
82ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_userspace),
83ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_null),
84ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_request),
85ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_io_request),
86ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
87ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
88ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_validity),
89ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instruction),
90ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_pei),
91ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
92ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
93ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
94ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
95ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
96ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
97ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
98ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
99ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
100ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
101ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
102ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
103ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
104ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
105ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
106ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
107ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
108ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_program),
109ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_io),
110ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
111ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
112ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_ckc),
113ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_cputm),
114ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_external_call),
115ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
116ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_mchk),
117ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
118ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_program),
119ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_restart),
120ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
121ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
122ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
123ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_gs),
124ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
125ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
126ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
127ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
128ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
129ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sck),
130ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
131ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
132ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_spx),
133ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
134ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stap),
135ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_iske),
136ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ri),
137ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
138ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sske),
139ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
140ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
141ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
142ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tb),
143ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
144ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
145ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
146ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sie),
147ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_essa),
148ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
149ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
150ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
151ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
152ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
153ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
154ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
155ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
156ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
157ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
158ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
159ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
160ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
161ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
162ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
163ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
164ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
165bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
166bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
167bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
168bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
169bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
170bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
171bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
172bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
173bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
174ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, pfault_sync)
175ce55c049SJing Zhang };
176ce55c049SJing Zhang 
177ce55c049SJing Zhang const struct kvm_stats_header kvm_vcpu_stats_header = {
178ce55c049SJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
179ce55c049SJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
180ce55c049SJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
181ce55c049SJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
182ce55c049SJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
183ce55c049SJing Zhang 		       sizeof(kvm_vcpu_stats_desc),
184ce55c049SJing Zhang };
185ce55c049SJing Zhang 
186a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
187a411edf1SDavid Hildenbrand static int nested;
188a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
189a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
190a411edf1SDavid Hildenbrand 
191a4499382SJanosch Frank /* allow 1m huge page guest backing, if !nested */
192a4499382SJanosch Frank static int hpage;
193a4499382SJanosch Frank module_param(hpage, int, 0444);
194a4499382SJanosch Frank MODULE_PARM_DESC(hpage, "1m huge page backing support");
195b0c632dbSHeiko Carstens 
1968b905d28SChristian Borntraeger /* maximum percentage of steal time for polling.  >100 is treated like 100 */
1978b905d28SChristian Borntraeger static u8 halt_poll_max_steal = 10;
1988b905d28SChristian Borntraeger module_param(halt_poll_max_steal, byte, 0644);
199b41fb528SWei Yongjun MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
2008b905d28SChristian Borntraeger 
201cc674ef2SMichael Mueller /* if set to true, the GISA will be initialized and used if available */
202cc674ef2SMichael Mueller static bool use_gisa  = true;
203cc674ef2SMichael Mueller module_param(use_gisa, bool, 0644);
204cc674ef2SMichael Mueller MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
205cc674ef2SMichael Mueller 
20687e28a15SPierre Morel /* maximum diag9c forwarding per second */
20787e28a15SPierre Morel unsigned int diag9c_forwarding_hz;
20887e28a15SPierre Morel module_param(diag9c_forwarding_hz, uint, 0644);
20987e28a15SPierre Morel MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
21087e28a15SPierre Morel 
211c3b9e3e1SChristian Borntraeger /*
212c3b9e3e1SChristian Borntraeger  * For now we handle at most 16 double words as this is what the s390 base
213c3b9e3e1SChristian Borntraeger  * kernel handles and stores in the prefix page. If we ever need to go beyond
214c3b9e3e1SChristian Borntraeger  * this, this requires changes to code, but the external uapi can stay.
215c3b9e3e1SChristian Borntraeger  */
216c3b9e3e1SChristian Borntraeger #define SIZE_INTERNAL 16
217c3b9e3e1SChristian Borntraeger 
218c3b9e3e1SChristian Borntraeger /*
219c3b9e3e1SChristian Borntraeger  * Base feature mask that defines default mask for facilities. Consists of the
220c3b9e3e1SChristian Borntraeger  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
221c3b9e3e1SChristian Borntraeger  */
222c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
223c3b9e3e1SChristian Borntraeger /*
224c3b9e3e1SChristian Borntraeger  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
225c3b9e3e1SChristian Borntraeger  * and defines the facilities that can be enabled via a cpu model.
226c3b9e3e1SChristian Borntraeger  */
227c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
228c3b9e3e1SChristian Borntraeger 
229c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_size(void)
23078c4b59fSMichael Mueller {
231c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
232c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
233c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
23417e89e13SSven Schnelle 		sizeof(stfle_fac_list));
235c3b9e3e1SChristian Borntraeger 
236c3b9e3e1SChristian Borntraeger 	return SIZE_INTERNAL;
23778c4b59fSMichael Mueller }
23878c4b59fSMichael Mueller 
23915c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
24015c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
2410a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
2420a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
24315c9705fSDavid Hildenbrand 
2449d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
245a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
24678f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
2473e6c5568SJanosch Frank debug_info_t *kvm_s390_dbf_uv;
2489d8d5786SMichael Mueller 
249b0c632dbSHeiko Carstens /* Section: not file related */
25013a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
251b0c632dbSHeiko Carstens {
252b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
25310474ae8SAlexander Graf 	return 0;
254b0c632dbSHeiko Carstens }
255b0c632dbSHeiko Carstens 
256b9904085SSean Christopherson int kvm_arch_check_processor_compat(void *opaque)
257f257d6dcSSean Christopherson {
258f257d6dcSSean Christopherson 	return 0;
259f257d6dcSSean Christopherson }
260f257d6dcSSean Christopherson 
26129b40f10SJanosch Frank /* forward declarations */
262414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
263414d3b07SMartin Schwidefsky 			      unsigned long end);
26429b40f10SJanosch Frank static int sca_switch_to_extended(struct kvm *kvm);
2652c70fe44SChristian Borntraeger 
2661575767eSDavid Hildenbrand static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
2671575767eSDavid Hildenbrand {
2681575767eSDavid Hildenbrand 	u8 delta_idx = 0;
2691575767eSDavid Hildenbrand 
2701575767eSDavid Hildenbrand 	/*
2711575767eSDavid Hildenbrand 	 * The TOD jumps by delta, we have to compensate this by adding
2721575767eSDavid Hildenbrand 	 * -delta to the epoch.
2731575767eSDavid Hildenbrand 	 */
2741575767eSDavid Hildenbrand 	delta = -delta;
2751575767eSDavid Hildenbrand 
2761575767eSDavid Hildenbrand 	/* sign-extension - we're adding to signed values below */
2771575767eSDavid Hildenbrand 	if ((s64)delta < 0)
2781575767eSDavid Hildenbrand 		delta_idx = -1;
2791575767eSDavid Hildenbrand 
2801575767eSDavid Hildenbrand 	scb->epoch += delta;
2811575767eSDavid Hildenbrand 	if (scb->ecd & ECD_MEF) {
2821575767eSDavid Hildenbrand 		scb->epdx += delta_idx;
2831575767eSDavid Hildenbrand 		if (scb->epoch < delta)
2841575767eSDavid Hildenbrand 			scb->epdx += 1;
2851575767eSDavid Hildenbrand 	}
2861575767eSDavid Hildenbrand }
2871575767eSDavid Hildenbrand 
288fdf03650SFan Zhang /*
289fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
290fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
291fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
292fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
293fdf03650SFan Zhang  */
294fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
295fdf03650SFan Zhang 			  void *v)
296fdf03650SFan Zhang {
297fdf03650SFan Zhang 	struct kvm *kvm;
298fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
29946808a4cSMarc Zyngier 	unsigned long i;
300fdf03650SFan Zhang 	unsigned long long *delta = v;
301fdf03650SFan Zhang 
302fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
303fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
3041575767eSDavid Hildenbrand 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
3051575767eSDavid Hildenbrand 			if (i == 0) {
3061575767eSDavid Hildenbrand 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
3071575767eSDavid Hildenbrand 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
3081575767eSDavid Hildenbrand 			}
309db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
310db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
31191473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
3121575767eSDavid Hildenbrand 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
3131575767eSDavid Hildenbrand 						   *delta);
314fdf03650SFan Zhang 		}
315fdf03650SFan Zhang 	}
316fdf03650SFan Zhang 	return NOTIFY_OK;
317fdf03650SFan Zhang }
318fdf03650SFan Zhang 
319fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
320fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
321fdf03650SFan Zhang };
322fdf03650SFan Zhang 
323b9904085SSean Christopherson int kvm_arch_hardware_setup(void *opaque)
324b0c632dbSHeiko Carstens {
3252c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
326b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
327a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
328a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
329fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
330fdf03650SFan Zhang 				       &kvm_clock_notifier);
331b0c632dbSHeiko Carstens 	return 0;
332b0c632dbSHeiko Carstens }
333b0c632dbSHeiko Carstens 
334b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
335b0c632dbSHeiko Carstens {
336b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
337a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
338fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
339fdf03650SFan Zhang 					 &kvm_clock_notifier);
340b0c632dbSHeiko Carstens }
341b0c632dbSHeiko Carstens 
34222be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
34322be5a13SDavid Hildenbrand {
34422be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
34522be5a13SDavid Hildenbrand }
34622be5a13SDavid Hildenbrand 
3470a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
3480a763c78SDavid Hildenbrand {
3494fa3b91bSHeiko Carstens 	unsigned long function = (unsigned long)nr | 0x100;
350d051ae53SHeiko Carstens 	int cc;
3510a763c78SDavid Hildenbrand 
3520a763c78SDavid Hildenbrand 	asm volatile(
3534fa3b91bSHeiko Carstens 		"	lgr	0,%[function]\n"
3540a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
3550a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
3560a763c78SDavid Hildenbrand 		"	ipm	%0\n"
3570a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
3580a763c78SDavid Hildenbrand 		: "=d" (cc)
3594fa3b91bSHeiko Carstens 		: [function] "d" (function)
3604fa3b91bSHeiko Carstens 		: "cc", "0");
3610a763c78SDavid Hildenbrand 	return cc == 0;
3620a763c78SDavid Hildenbrand }
3630a763c78SDavid Hildenbrand 
364d0dea733SHeiko Carstens static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
365d6681397SChristian Borntraeger {
366d6681397SChristian Borntraeger 	asm volatile(
3674fa3b91bSHeiko Carstens 		"	lghi	0,0\n"
3684fa3b91bSHeiko Carstens 		"	lgr	1,%[query]\n"
3694fa3b91bSHeiko Carstens 		/* Parameter registers are ignored */
370d6681397SChristian Borntraeger 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
371b1c41ac3SHeiko Carstens 		:
3724fa3b91bSHeiko Carstens 		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
3734fa3b91bSHeiko Carstens 		: "cc", "memory", "0", "1");
374d6681397SChristian Borntraeger }
375d6681397SChristian Borntraeger 
376173aec2dSChristian Borntraeger #define INSN_SORTL 0xb938
3774f45b90eSChristian Borntraeger #define INSN_DFLTCC 0xb939
378173aec2dSChristian Borntraeger 
37922be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
38022be5a13SDavid Hildenbrand {
3810a763c78SDavid Hildenbrand 	int i;
3820a763c78SDavid Hildenbrand 
3830a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
3840a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
3850a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
3860a763c78SDavid Hildenbrand 	}
3870a763c78SDavid Hildenbrand 
3880a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
389221bb8a4SLinus Torvalds 		ptff(kvm_s390_available_subfunc.ptff,
390221bb8a4SLinus Torvalds 		     sizeof(kvm_s390_available_subfunc.ptff),
391221bb8a4SLinus Torvalds 		     PTFF_QAF);
3920a763c78SDavid Hildenbrand 
3930a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
39469c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
39569c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmac);
39669c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
39769c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmc);
39869c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
39969c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.km);
40069c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
40169c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kimd);
40269c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
40369c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.klmd);
4040a763c78SDavid Hildenbrand 	}
4050a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
40669c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
40769c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pckmo);
4080a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
40969c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
41069c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmctr);
41169c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
41269c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmf);
41369c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
41469c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmo);
41569c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
41669c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pcc);
4170a763c78SDavid Hildenbrand 	}
4180a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
419985a9d20SHarald Freudenberger 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
42069c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.ppno);
4210a763c78SDavid Hildenbrand 
422e000b8e0SJason J. Herne 	if (test_facility(146)) /* MSA8 */
423e000b8e0SJason J. Herne 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
424e000b8e0SJason J. Herne 			      kvm_s390_available_subfunc.kma);
425e000b8e0SJason J. Herne 
42613209ad0SChristian Borntraeger 	if (test_facility(155)) /* MSA9 */
42713209ad0SChristian Borntraeger 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
42813209ad0SChristian Borntraeger 			      kvm_s390_available_subfunc.kdsa);
42913209ad0SChristian Borntraeger 
430173aec2dSChristian Borntraeger 	if (test_facility(150)) /* SORTL */
431173aec2dSChristian Borntraeger 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
432173aec2dSChristian Borntraeger 
4334f45b90eSChristian Borntraeger 	if (test_facility(151)) /* DFLTCC */
4344f45b90eSChristian Borntraeger 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
4354f45b90eSChristian Borntraeger 
43622be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
43722be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
438a3508fbeSDavid Hildenbrand 	/*
439a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
440a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
441a3508fbeSDavid Hildenbrand 	 */
442a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
443a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
444a3508fbeSDavid Hildenbrand 		return;
445a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
44619c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
44719c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
4480615a326SDavid Hildenbrand 	if (sclp.has_siif)
4490615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
45077d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
45177d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
452a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
453a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
4545630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
4555630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
45613ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
45713ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
4587fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
4597fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
460730cd632SFarhan Ali 	if (sclp.has_kss)
461730cd632SFarhan Ali 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
4625d3876a8SDavid Hildenbrand 	/*
4635d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
4645d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
4655d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
4665d3876a8SDavid Hildenbrand 	 *
4675d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
4685d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
4695d3876a8SDavid Hildenbrand 	 *
4705d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
4715d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
4725d3876a8SDavid Hildenbrand 	 *
4735d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
4745d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
4755d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
4765d3876a8SDavid Hildenbrand 	 *
4775d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
4785d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
4795d3876a8SDavid Hildenbrand 	 */
48022be5a13SDavid Hildenbrand }
48122be5a13SDavid Hildenbrand 
482b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
483b0c632dbSHeiko Carstens {
484f76f6371SJanosch Frank 	int rc = -ENOMEM;
485308c3e66SMichael Mueller 
48678f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
48778f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
48878f26131SChristian Borntraeger 		return -ENOMEM;
48978f26131SChristian Borntraeger 
4903e6c5568SJanosch Frank 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
4913e6c5568SJanosch Frank 	if (!kvm_s390_dbf_uv)
4923e6c5568SJanosch Frank 		goto out;
4933e6c5568SJanosch Frank 
4943e6c5568SJanosch Frank 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
4953e6c5568SJanosch Frank 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
496f76f6371SJanosch Frank 		goto out;
49778f26131SChristian Borntraeger 
49822be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
49922be5a13SDavid Hildenbrand 
50084877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
501308c3e66SMichael Mueller 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
502308c3e66SMichael Mueller 	if (rc) {
5038d43d570SMichael Mueller 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
504f76f6371SJanosch Frank 		goto out;
505308c3e66SMichael Mueller 	}
506b1d1e76eSMichael Mueller 
50798b1d33dSMatthew Rosato 	if (kvm_s390_pci_interp_allowed()) {
50898b1d33dSMatthew Rosato 		rc = kvm_s390_pci_init();
50998b1d33dSMatthew Rosato 		if (rc) {
51098b1d33dSMatthew Rosato 			pr_err("Unable to allocate AIFT for PCI\n");
51198b1d33dSMatthew Rosato 			goto out;
51298b1d33dSMatthew Rosato 		}
51398b1d33dSMatthew Rosato 	}
51498b1d33dSMatthew Rosato 
515b1d1e76eSMichael Mueller 	rc = kvm_s390_gib_init(GAL_ISC);
516b1d1e76eSMichael Mueller 	if (rc)
517f76f6371SJanosch Frank 		goto out;
518b1d1e76eSMichael Mueller 
519308c3e66SMichael Mueller 	return 0;
520308c3e66SMichael Mueller 
521f76f6371SJanosch Frank out:
522f76f6371SJanosch Frank 	kvm_arch_exit();
523308c3e66SMichael Mueller 	return rc;
524b0c632dbSHeiko Carstens }
525b0c632dbSHeiko Carstens 
52678f26131SChristian Borntraeger void kvm_arch_exit(void)
52778f26131SChristian Borntraeger {
5281282c21eSMichael Mueller 	kvm_s390_gib_destroy();
52998b1d33dSMatthew Rosato 	if (kvm_s390_pci_interp_allowed())
53098b1d33dSMatthew Rosato 		kvm_s390_pci_exit();
53178f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
5323e6c5568SJanosch Frank 	debug_unregister(kvm_s390_dbf_uv);
53378f26131SChristian Borntraeger }
53478f26131SChristian Borntraeger 
535b0c632dbSHeiko Carstens /* Section: device related */
536b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
537b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
538b0c632dbSHeiko Carstens {
539b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
540b0c632dbSHeiko Carstens 		return s390_enable_sie();
541b0c632dbSHeiko Carstens 	return -EINVAL;
542b0c632dbSHeiko Carstens }
543b0c632dbSHeiko Carstens 
544784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
545b0c632dbSHeiko Carstens {
546d7b0b5ebSCarsten Otte 	int r;
547d7b0b5ebSCarsten Otte 
5482bd0ac4eSCarsten Otte 	switch (ext) {
549d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
550b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
55152e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
5521efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
5531efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
5541efd0f59SCarsten Otte #endif
5553c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
55660b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
55714eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
558d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
559fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
56010ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
561c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
56278599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
563f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
5646352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
565460df4c1SPaolo Bonzini 	case KVM_CAP_IMMEDIATE_EXIT:
56647b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
5672444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
568e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
56930ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
570816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
5716502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
5724036e387SClaudio Imbrenda 	case KVM_CAP_S390_CMMA_MIGRATION:
57347a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
574da9a1446SChristian Borntraeger 	case KVM_CAP_S390_AIS_MIGRATION:
5757de3f142SJanosch Frank 	case KVM_CAP_S390_VCPU_RESETS:
576b9b2782cSPeter Xu 	case KVM_CAP_SET_GUEST_DEBUG:
57723a60f83SCollin Walling 	case KVM_CAP_S390_DIAG318:
578d004079eSJanis Schoetterl-Glausch 	case KVM_CAP_S390_MEM_OP_EXTENSION:
579d7b0b5ebSCarsten Otte 		r = 1;
580d7b0b5ebSCarsten Otte 		break;
581a43b80b7SMaxim Levitsky 	case KVM_CAP_SET_GUEST_DEBUG2:
582a43b80b7SMaxim Levitsky 		r = KVM_GUESTDBG_VALID_MASK;
583a43b80b7SMaxim Levitsky 		break;
584a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
585a4499382SJanosch Frank 		r = 0;
58640ebdb8eSJanosch Frank 		if (hpage && !kvm_is_ucontrol(kvm))
587a4499382SJanosch Frank 			r = 1;
588a4499382SJanosch Frank 		break;
58941408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
59041408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
59141408c28SThomas Huth 		break;
592e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
593e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
594a86cb413SThomas Huth 	case KVM_CAP_MAX_VCPU_ID:
59576a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
596a6940674SDavid Hildenbrand 		if (!kvm_s390_use_sca_entries())
597a6940674SDavid Hildenbrand 			r = KVM_MAX_VCPUS;
598a6940674SDavid Hildenbrand 		else if (sclp.has_esca && sclp.has_64bscao)
59976a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
60082cc27efSVitaly Kuznetsov 		if (ext == KVM_CAP_NR_VCPUS)
60182cc27efSVitaly Kuznetsov 			r = min_t(unsigned int, num_online_cpus(), r);
602e726b1bdSChristian Borntraeger 		break;
6031526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
604abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
6051526bf9cSChristian Borntraeger 		break;
60668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
60768c55750SEric Farman 		r = MACHINE_HAS_VX;
60868c55750SEric Farman 		break;
609c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
610c6e5f166SFan Zhang 		r = test_facility(64);
611c6e5f166SFan Zhang 		break;
6124e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
6134e0b1ab7SFan Zhang 		r = test_facility(133);
6144e0b1ab7SFan Zhang 		break;
61535b3fde6SChristian Borntraeger 	case KVM_CAP_S390_BPB:
61635b3fde6SChristian Borntraeger 		r = test_facility(82);
61735b3fde6SChristian Borntraeger 		break;
61813da9ae1SChristian Borntraeger 	case KVM_CAP_S390_PROTECTED:
61913da9ae1SChristian Borntraeger 		r = is_prot_virt_host();
62013da9ae1SChristian Borntraeger 		break;
621e9bf3acbSJanosch Frank 	case KVM_CAP_S390_PROTECTED_DUMP: {
622e9bf3acbSJanosch Frank 		u64 pv_cmds_dump[] = {
623e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_INIT,
624e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
625e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_CPU,
626e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_COMPLETE,
627e9bf3acbSJanosch Frank 		};
628e9bf3acbSJanosch Frank 		int i;
629e9bf3acbSJanosch Frank 
630e9bf3acbSJanosch Frank 		r = is_prot_virt_host();
631e9bf3acbSJanosch Frank 
632e9bf3acbSJanosch Frank 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
633e9bf3acbSJanosch Frank 			if (!test_bit_inv(pv_cmds_dump[i],
634e9bf3acbSJanosch Frank 					  (unsigned long *)&uv_info.inst_calls_list)) {
635e9bf3acbSJanosch Frank 				r = 0;
636e9bf3acbSJanosch Frank 				break;
637e9bf3acbSJanosch Frank 			}
638e9bf3acbSJanosch Frank 		}
639e9bf3acbSJanosch Frank 		break;
640e9bf3acbSJanosch Frank 	}
641db1c875eSMatthew Rosato 	case KVM_CAP_S390_ZPCI_OP:
642db1c875eSMatthew Rosato 		r = kvm_s390_pci_interp_allowed();
643db1c875eSMatthew Rosato 		break;
6442bd0ac4eSCarsten Otte 	default:
645d7b0b5ebSCarsten Otte 		r = 0;
646b0c632dbSHeiko Carstens 	}
647d7b0b5ebSCarsten Otte 	return r;
6482bd0ac4eSCarsten Otte }
649b0c632dbSHeiko Carstens 
6500dff0846SSean Christopherson void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
65115f36ebdSJason J. Herne {
6520959e168SJanosch Frank 	int i;
65315f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
6540959e168SJanosch Frank 	unsigned long gaddr, vmaddr;
65515f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
6560959e168SJanosch Frank 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
65715f36ebdSJason J. Herne 
6580959e168SJanosch Frank 	/* Loop over all guest segments */
6590959e168SJanosch Frank 	cur_gfn = memslot->base_gfn;
66015f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
6610959e168SJanosch Frank 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
6620959e168SJanosch Frank 		gaddr = gfn_to_gpa(cur_gfn);
6630959e168SJanosch Frank 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
6640959e168SJanosch Frank 		if (kvm_is_error_hva(vmaddr))
6650959e168SJanosch Frank 			continue;
66615f36ebdSJason J. Herne 
6670959e168SJanosch Frank 		bitmap_zero(bitmap, _PAGE_ENTRIES);
6680959e168SJanosch Frank 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
6690959e168SJanosch Frank 		for (i = 0; i < _PAGE_ENTRIES; i++) {
6700959e168SJanosch Frank 			if (test_bit(i, bitmap))
6710959e168SJanosch Frank 				mark_page_dirty(kvm, cur_gfn + i);
6720959e168SJanosch Frank 		}
6730959e168SJanosch Frank 
6741763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
6751763f8d0SChristian Borntraeger 			return;
67670c88a00SChristian Borntraeger 		cond_resched();
67715f36ebdSJason J. Herne 	}
67815f36ebdSJason J. Herne }
67915f36ebdSJason J. Herne 
680b0c632dbSHeiko Carstens /* Section: vm related */
681a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
682a6e2f683SEugene (jno) Dvurechenski 
683b0c632dbSHeiko Carstens /*
684b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
685b0c632dbSHeiko Carstens  */
686b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
687b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
688b0c632dbSHeiko Carstens {
68915f36ebdSJason J. Herne 	int r;
69015f36ebdSJason J. Herne 	unsigned long n;
69115f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
6922a49f61dSSean Christopherson 	int is_dirty;
69315f36ebdSJason J. Herne 
694e1e8a962SJanosch Frank 	if (kvm_is_ucontrol(kvm))
695e1e8a962SJanosch Frank 		return -EINVAL;
696e1e8a962SJanosch Frank 
69715f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
69815f36ebdSJason J. Herne 
69915f36ebdSJason J. Herne 	r = -EINVAL;
70015f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
70115f36ebdSJason J. Herne 		goto out;
70215f36ebdSJason J. Herne 
7032a49f61dSSean Christopherson 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
70415f36ebdSJason J. Herne 	if (r)
70515f36ebdSJason J. Herne 		goto out;
70615f36ebdSJason J. Herne 
70715f36ebdSJason J. Herne 	/* Clear the dirty log */
70815f36ebdSJason J. Herne 	if (is_dirty) {
70915f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
71015f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
71115f36ebdSJason J. Herne 	}
71215f36ebdSJason J. Herne 	r = 0;
71315f36ebdSJason J. Herne out:
71415f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
71515f36ebdSJason J. Herne 	return r;
716b0c632dbSHeiko Carstens }
717b0c632dbSHeiko Carstens 
7186502a34cSDavid Hildenbrand static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
7196502a34cSDavid Hildenbrand {
72046808a4cSMarc Zyngier 	unsigned long i;
7216502a34cSDavid Hildenbrand 	struct kvm_vcpu *vcpu;
7226502a34cSDavid Hildenbrand 
7236502a34cSDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
7246502a34cSDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
7256502a34cSDavid Hildenbrand 	}
7266502a34cSDavid Hildenbrand }
7276502a34cSDavid Hildenbrand 
728e5d83c74SPaolo Bonzini int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
729d938dc55SCornelia Huck {
730d938dc55SCornelia Huck 	int r;
731d938dc55SCornelia Huck 
732d938dc55SCornelia Huck 	if (cap->flags)
733d938dc55SCornelia Huck 		return -EINVAL;
734d938dc55SCornelia Huck 
735d938dc55SCornelia Huck 	switch (cap->cap) {
73684223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
737c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
73884223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
73984223598SCornelia Huck 		r = 0;
74084223598SCornelia Huck 		break;
7412444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
742c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
7432444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
7442444b352SDavid Hildenbrand 		r = 0;
7452444b352SDavid Hildenbrand 		break;
74668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
7475967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
748a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
7495967c17bSDavid Hildenbrand 			r = -EBUSY;
7505967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
751c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
752c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
7532f87d942SGuenther Hutzl 			if (test_facility(134)) {
7542f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
7552f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_list, 134);
7562f87d942SGuenther Hutzl 			}
75753743aa7SMaxim Samoylov 			if (test_facility(135)) {
75853743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
75953743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_list, 135);
76053743aa7SMaxim Samoylov 			}
7617832e91cSChristian Borntraeger 			if (test_facility(148)) {
7627832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
7637832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 148);
7647832e91cSChristian Borntraeger 			}
765d5cb6ab1SChristian Borntraeger 			if (test_facility(152)) {
766d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
767d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 152);
768d5cb6ab1SChristian Borntraeger 			}
7691f703d2cSChristian Borntraeger 			if (test_facility(192)) {
7701f703d2cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
7711f703d2cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 192);
7721f703d2cSChristian Borntraeger 			}
77318280d8bSMichael Mueller 			r = 0;
77418280d8bSMichael Mueller 		} else
77518280d8bSMichael Mueller 			r = -EINVAL;
7765967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
777c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
778c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
77968c55750SEric Farman 		break;
780c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
781c6e5f166SFan Zhang 		r = -EINVAL;
782c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
783a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
784c6e5f166SFan Zhang 			r = -EBUSY;
785c6e5f166SFan Zhang 		} else if (test_facility(64)) {
786c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
787c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
788c6e5f166SFan Zhang 			r = 0;
789c6e5f166SFan Zhang 		}
790c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
791c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
792c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
793c6e5f166SFan Zhang 		break;
79447a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
79547a4693eSYi Min Zhao 		mutex_lock(&kvm->lock);
79647a4693eSYi Min Zhao 		if (kvm->created_vcpus) {
79747a4693eSYi Min Zhao 			r = -EBUSY;
79847a4693eSYi Min Zhao 		} else {
79947a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
80047a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_list, 72);
80147a4693eSYi Min Zhao 			r = 0;
80247a4693eSYi Min Zhao 		}
80347a4693eSYi Min Zhao 		mutex_unlock(&kvm->lock);
80447a4693eSYi Min Zhao 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
80547a4693eSYi Min Zhao 			 r ? "(not available)" : "(success)");
80647a4693eSYi Min Zhao 		break;
8074e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
8084e0b1ab7SFan Zhang 		r = -EINVAL;
8094e0b1ab7SFan Zhang 		mutex_lock(&kvm->lock);
810241e3ec0SChristian Borntraeger 		if (kvm->created_vcpus) {
8114e0b1ab7SFan Zhang 			r = -EBUSY;
8124e0b1ab7SFan Zhang 		} else if (test_facility(133)) {
8134e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
8144e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_list, 133);
8154e0b1ab7SFan Zhang 			r = 0;
8164e0b1ab7SFan Zhang 		}
8174e0b1ab7SFan Zhang 		mutex_unlock(&kvm->lock);
8184e0b1ab7SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
8194e0b1ab7SFan Zhang 			 r ? "(not available)" : "(success)");
8204e0b1ab7SFan Zhang 		break;
821a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
822a4499382SJanosch Frank 		mutex_lock(&kvm->lock);
823a4499382SJanosch Frank 		if (kvm->created_vcpus)
824a4499382SJanosch Frank 			r = -EBUSY;
82540ebdb8eSJanosch Frank 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
826a4499382SJanosch Frank 			r = -EINVAL;
827a4499382SJanosch Frank 		else {
828a4499382SJanosch Frank 			r = 0;
829d8ed45c5SMichel Lespinasse 			mmap_write_lock(kvm->mm);
830a4499382SJanosch Frank 			kvm->mm->context.allow_gmap_hpage_1m = 1;
831d8ed45c5SMichel Lespinasse 			mmap_write_unlock(kvm->mm);
832a4499382SJanosch Frank 			/*
833a4499382SJanosch Frank 			 * We might have to create fake 4k page
834a4499382SJanosch Frank 			 * tables. To avoid that the hardware works on
835a4499382SJanosch Frank 			 * stale PGSTEs, we emulate these instructions.
836a4499382SJanosch Frank 			 */
837a4499382SJanosch Frank 			kvm->arch.use_skf = 0;
838a4499382SJanosch Frank 			kvm->arch.use_pfmfi = 0;
839a4499382SJanosch Frank 		}
840a4499382SJanosch Frank 		mutex_unlock(&kvm->lock);
841a4499382SJanosch Frank 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
842a4499382SJanosch Frank 			 r ? "(not available)" : "(success)");
843a4499382SJanosch Frank 		break;
844e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
845c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
846e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
847e44fc8c9SEkaterina Tumanova 		r = 0;
848e44fc8c9SEkaterina Tumanova 		break;
8496502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
8506502a34cSDavid Hildenbrand 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
8516502a34cSDavid Hildenbrand 		kvm->arch.user_instr0 = 1;
8526502a34cSDavid Hildenbrand 		icpt_operexc_on_all_vcpus(kvm);
8536502a34cSDavid Hildenbrand 		r = 0;
8546502a34cSDavid Hildenbrand 		break;
855d938dc55SCornelia Huck 	default:
856d938dc55SCornelia Huck 		r = -EINVAL;
857d938dc55SCornelia Huck 		break;
858d938dc55SCornelia Huck 	}
859d938dc55SCornelia Huck 	return r;
860d938dc55SCornelia Huck }
861d938dc55SCornelia Huck 
8628c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
8638c0a7ce6SDominik Dingel {
8648c0a7ce6SDominik Dingel 	int ret;
8658c0a7ce6SDominik Dingel 
8668c0a7ce6SDominik Dingel 	switch (attr->attr) {
8678c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
8688c0a7ce6SDominik Dingel 		ret = 0;
869c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
870a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
871a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8728c0a7ce6SDominik Dingel 			ret = -EFAULT;
8738c0a7ce6SDominik Dingel 		break;
8748c0a7ce6SDominik Dingel 	default:
8758c0a7ce6SDominik Dingel 		ret = -ENXIO;
8768c0a7ce6SDominik Dingel 		break;
8778c0a7ce6SDominik Dingel 	}
8788c0a7ce6SDominik Dingel 	return ret;
8798c0a7ce6SDominik Dingel }
8808c0a7ce6SDominik Dingel 
8818c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
8824f718eabSDominik Dingel {
8834f718eabSDominik Dingel 	int ret;
8844f718eabSDominik Dingel 	unsigned int idx;
8854f718eabSDominik Dingel 	switch (attr->attr) {
8864f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
887f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
888c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
889e6db1d61SDominik Dingel 			break;
890e6db1d61SDominik Dingel 
891c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
8924f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
893a4499382SJanosch Frank 		if (kvm->created_vcpus)
894a4499382SJanosch Frank 			ret = -EBUSY;
895a4499382SJanosch Frank 		else if (kvm->mm->context.allow_gmap_hpage_1m)
896a4499382SJanosch Frank 			ret = -EINVAL;
897a4499382SJanosch Frank 		else {
8984f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
899c9f0a2b8SJanosch Frank 			/* Not compatible with cmma. */
900c9f0a2b8SJanosch Frank 			kvm->arch.use_pfmfi = 0;
9014f718eabSDominik Dingel 			ret = 0;
9024f718eabSDominik Dingel 		}
9034f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9044f718eabSDominik Dingel 		break;
9054f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
906f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
907f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
908f9cbd9b0SDavid Hildenbrand 			break;
909c3489155SDominik Dingel 		ret = -EINVAL;
910c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
911c3489155SDominik Dingel 			break;
912c3489155SDominik Dingel 
913c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
9144f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
9154f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
916a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
9174f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
9184f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9194f718eabSDominik Dingel 		ret = 0;
9204f718eabSDominik Dingel 		break;
9218c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
9228c0a7ce6SDominik Dingel 		unsigned long new_limit;
9238c0a7ce6SDominik Dingel 
9248c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
9258c0a7ce6SDominik Dingel 			return -EINVAL;
9268c0a7ce6SDominik Dingel 
9278c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
9288c0a7ce6SDominik Dingel 			return -EFAULT;
9298c0a7ce6SDominik Dingel 
930a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
931a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
9328c0a7ce6SDominik Dingel 			return -E2BIG;
9338c0a7ce6SDominik Dingel 
934a3a92c31SDominik Dingel 		if (!new_limit)
935a3a92c31SDominik Dingel 			return -EINVAL;
936a3a92c31SDominik Dingel 
9376ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
938a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
939a3a92c31SDominik Dingel 			new_limit -= 1;
940a3a92c31SDominik Dingel 
9418c0a7ce6SDominik Dingel 		ret = -EBUSY;
9428c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
943a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
9446ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
9456ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
9468c0a7ce6SDominik Dingel 
9478c0a7ce6SDominik Dingel 			if (!new) {
9488c0a7ce6SDominik Dingel 				ret = -ENOMEM;
9498c0a7ce6SDominik Dingel 			} else {
9506ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
9518c0a7ce6SDominik Dingel 				new->private = kvm;
9528c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
9538c0a7ce6SDominik Dingel 				ret = 0;
9548c0a7ce6SDominik Dingel 			}
9558c0a7ce6SDominik Dingel 		}
9568c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
957a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
958a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
959a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
9608c0a7ce6SDominik Dingel 		break;
9618c0a7ce6SDominik Dingel 	}
9624f718eabSDominik Dingel 	default:
9634f718eabSDominik Dingel 		ret = -ENXIO;
9644f718eabSDominik Dingel 		break;
9654f718eabSDominik Dingel 	}
9664f718eabSDominik Dingel 	return ret;
9674f718eabSDominik Dingel }
9684f718eabSDominik Dingel 
969a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
970a374e892STony Krowiak 
97120c922f0STony Krowiak void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
972a374e892STony Krowiak {
973a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
97446808a4cSMarc Zyngier 	unsigned long i;
975a374e892STony Krowiak 
97620c922f0STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
97720c922f0STony Krowiak 
9783194cdb7SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
97920c922f0STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
9803194cdb7SDavid Hildenbrand 		/* recreate the shadow crycb by leaving the VSIE handler */
9813194cdb7SDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
9823194cdb7SDavid Hildenbrand 	}
98320c922f0STony Krowiak 
98420c922f0STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
98520c922f0STony Krowiak }
98620c922f0STony Krowiak 
98720c922f0STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
98820c922f0STony Krowiak {
989a374e892STony Krowiak 	mutex_lock(&kvm->lock);
990a374e892STony Krowiak 	switch (attr->attr) {
991a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
9928e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
9938e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
99437940fb0STony Krowiak 			return -EINVAL;
9958e41bd54SChristian Borntraeger 		}
996a374e892STony Krowiak 		get_random_bytes(
997a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
998a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
999a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
1000c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1001a374e892STony Krowiak 		break;
1002a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
10038e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10048e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
100537940fb0STony Krowiak 			return -EINVAL;
10068e41bd54SChristian Borntraeger 		}
1007a374e892STony Krowiak 		get_random_bytes(
1008a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1009a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1010a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
1011c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1012a374e892STony Krowiak 		break;
1013a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
10148e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10158e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
101637940fb0STony Krowiak 			return -EINVAL;
10178e41bd54SChristian Borntraeger 		}
1018a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
1019a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1020a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1021c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1022a374e892STony Krowiak 		break;
1023a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
10248e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10258e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
102637940fb0STony Krowiak 			return -EINVAL;
10278e41bd54SChristian Borntraeger 		}
1028a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
1029a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1030a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1031c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1032a374e892STony Krowiak 		break;
103337940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
103437940fb0STony Krowiak 		if (!ap_instructions_available()) {
103537940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
103637940fb0STony Krowiak 			return -EOPNOTSUPP;
103737940fb0STony Krowiak 		}
103837940fb0STony Krowiak 		kvm->arch.crypto.apie = 1;
103937940fb0STony Krowiak 		break;
104037940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
104137940fb0STony Krowiak 		if (!ap_instructions_available()) {
104237940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
104337940fb0STony Krowiak 			return -EOPNOTSUPP;
104437940fb0STony Krowiak 		}
104537940fb0STony Krowiak 		kvm->arch.crypto.apie = 0;
104637940fb0STony Krowiak 		break;
1047a374e892STony Krowiak 	default:
1048a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
1049a374e892STony Krowiak 		return -ENXIO;
1050a374e892STony Krowiak 	}
1051a374e892STony Krowiak 
105220c922f0STony Krowiak 	kvm_s390_vcpu_crypto_reset_all(kvm);
1053a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
1054a374e892STony Krowiak 	return 0;
1055a374e892STony Krowiak }
1056a374e892STony Krowiak 
10573f4bbb43SMatthew Rosato static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
10583f4bbb43SMatthew Rosato {
10593f4bbb43SMatthew Rosato 	/* Only set the ECB bits after guest requests zPCI interpretation */
10603f4bbb43SMatthew Rosato 	if (!vcpu->kvm->arch.use_zpci_interp)
10613f4bbb43SMatthew Rosato 		return;
10623f4bbb43SMatthew Rosato 
10633f4bbb43SMatthew Rosato 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
10643f4bbb43SMatthew Rosato 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
10653f4bbb43SMatthew Rosato }
10663f4bbb43SMatthew Rosato 
10673f4bbb43SMatthew Rosato void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
10683f4bbb43SMatthew Rosato {
10693f4bbb43SMatthew Rosato 	struct kvm_vcpu *vcpu;
10703f4bbb43SMatthew Rosato 	unsigned long i;
10713f4bbb43SMatthew Rosato 
10723f4bbb43SMatthew Rosato 	lockdep_assert_held(&kvm->lock);
10733f4bbb43SMatthew Rosato 
10743f4bbb43SMatthew Rosato 	if (!kvm_s390_pci_interp_allowed())
10753f4bbb43SMatthew Rosato 		return;
10763f4bbb43SMatthew Rosato 
10773f4bbb43SMatthew Rosato 	/*
10783f4bbb43SMatthew Rosato 	 * If host is configured for PCI and the necessary facilities are
10793f4bbb43SMatthew Rosato 	 * available, turn on interpretation for the life of this guest
10803f4bbb43SMatthew Rosato 	 */
10813f4bbb43SMatthew Rosato 	kvm->arch.use_zpci_interp = 1;
10823f4bbb43SMatthew Rosato 
10833f4bbb43SMatthew Rosato 	kvm_s390_vcpu_block_all(kvm);
10843f4bbb43SMatthew Rosato 
10853f4bbb43SMatthew Rosato 	kvm_for_each_vcpu(i, vcpu, kvm) {
10863f4bbb43SMatthew Rosato 		kvm_s390_vcpu_pci_setup(vcpu);
10873f4bbb43SMatthew Rosato 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
10883f4bbb43SMatthew Rosato 	}
10893f4bbb43SMatthew Rosato 
10903f4bbb43SMatthew Rosato 	kvm_s390_vcpu_unblock_all(kvm);
10913f4bbb43SMatthew Rosato }
10923f4bbb43SMatthew Rosato 
1093190df4a2SClaudio Imbrenda static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1094190df4a2SClaudio Imbrenda {
109546808a4cSMarc Zyngier 	unsigned long cx;
1096190df4a2SClaudio Imbrenda 	struct kvm_vcpu *vcpu;
1097190df4a2SClaudio Imbrenda 
1098190df4a2SClaudio Imbrenda 	kvm_for_each_vcpu(cx, vcpu, kvm)
1099190df4a2SClaudio Imbrenda 		kvm_s390_sync_request(req, vcpu);
1100190df4a2SClaudio Imbrenda }
1101190df4a2SClaudio Imbrenda 
1102190df4a2SClaudio Imbrenda /*
1103190df4a2SClaudio Imbrenda  * Must be called with kvm->srcu held to avoid races on memslots, and with
11041de1ea7eSChristian Borntraeger  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1105190df4a2SClaudio Imbrenda  */
1106190df4a2SClaudio Imbrenda static int kvm_s390_vm_start_migration(struct kvm *kvm)
1107190df4a2SClaudio Imbrenda {
1108190df4a2SClaudio Imbrenda 	struct kvm_memory_slot *ms;
1109190df4a2SClaudio Imbrenda 	struct kvm_memslots *slots;
1110afdad616SClaudio Imbrenda 	unsigned long ram_pages = 0;
1111a54d8066SMaciej S. Szmigiero 	int bkt;
1112190df4a2SClaudio Imbrenda 
1113190df4a2SClaudio Imbrenda 	/* migration mode already enabled */
1114afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
1115190df4a2SClaudio Imbrenda 		return 0;
1116190df4a2SClaudio Imbrenda 	slots = kvm_memslots(kvm);
1117a54d8066SMaciej S. Szmigiero 	if (!slots || kvm_memslots_empty(slots))
1118190df4a2SClaudio Imbrenda 		return -EINVAL;
1119190df4a2SClaudio Imbrenda 
1120afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma) {
1121afdad616SClaudio Imbrenda 		kvm->arch.migration_mode = 1;
1122afdad616SClaudio Imbrenda 		return 0;
1123190df4a2SClaudio Imbrenda 	}
1124190df4a2SClaudio Imbrenda 	/* mark all the pages in active slots as dirty */
1125a54d8066SMaciej S. Szmigiero 	kvm_for_each_memslot(ms, bkt, slots) {
112613a17cc0SIgor Mammedov 		if (!ms->dirty_bitmap)
112713a17cc0SIgor Mammedov 			return -EINVAL;
1128afdad616SClaudio Imbrenda 		/*
1129afdad616SClaudio Imbrenda 		 * The second half of the bitmap is only used on x86,
1130afdad616SClaudio Imbrenda 		 * and would be wasted otherwise, so we put it to good
1131afdad616SClaudio Imbrenda 		 * use here to keep track of the state of the storage
1132afdad616SClaudio Imbrenda 		 * attributes.
1133afdad616SClaudio Imbrenda 		 */
1134afdad616SClaudio Imbrenda 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1135afdad616SClaudio Imbrenda 		ram_pages += ms->npages;
1136190df4a2SClaudio Imbrenda 	}
1137afdad616SClaudio Imbrenda 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1138afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 1;
1139190df4a2SClaudio Imbrenda 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1140190df4a2SClaudio Imbrenda 	return 0;
1141190df4a2SClaudio Imbrenda }
1142190df4a2SClaudio Imbrenda 
1143190df4a2SClaudio Imbrenda /*
11441de1ea7eSChristian Borntraeger  * Must be called with kvm->slots_lock to avoid races with ourselves and
1145190df4a2SClaudio Imbrenda  * kvm_s390_vm_start_migration.
1146190df4a2SClaudio Imbrenda  */
1147190df4a2SClaudio Imbrenda static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1148190df4a2SClaudio Imbrenda {
1149190df4a2SClaudio Imbrenda 	/* migration mode already disabled */
1150afdad616SClaudio Imbrenda 	if (!kvm->arch.migration_mode)
1151190df4a2SClaudio Imbrenda 		return 0;
1152afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 0;
1153afdad616SClaudio Imbrenda 	if (kvm->arch.use_cmma)
1154190df4a2SClaudio Imbrenda 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1155190df4a2SClaudio Imbrenda 	return 0;
1156190df4a2SClaudio Imbrenda }
1157190df4a2SClaudio Imbrenda 
1158190df4a2SClaudio Imbrenda static int kvm_s390_vm_set_migration(struct kvm *kvm,
1159190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1160190df4a2SClaudio Imbrenda {
11611de1ea7eSChristian Borntraeger 	int res = -ENXIO;
1162190df4a2SClaudio Imbrenda 
11631de1ea7eSChristian Borntraeger 	mutex_lock(&kvm->slots_lock);
1164190df4a2SClaudio Imbrenda 	switch (attr->attr) {
1165190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_START:
1166190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_start_migration(kvm);
1167190df4a2SClaudio Imbrenda 		break;
1168190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_STOP:
1169190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_stop_migration(kvm);
1170190df4a2SClaudio Imbrenda 		break;
1171190df4a2SClaudio Imbrenda 	default:
1172190df4a2SClaudio Imbrenda 		break;
1173190df4a2SClaudio Imbrenda 	}
11741de1ea7eSChristian Borntraeger 	mutex_unlock(&kvm->slots_lock);
1175190df4a2SClaudio Imbrenda 
1176190df4a2SClaudio Imbrenda 	return res;
1177190df4a2SClaudio Imbrenda }
1178190df4a2SClaudio Imbrenda 
1179190df4a2SClaudio Imbrenda static int kvm_s390_vm_get_migration(struct kvm *kvm,
1180190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1181190df4a2SClaudio Imbrenda {
1182afdad616SClaudio Imbrenda 	u64 mig = kvm->arch.migration_mode;
1183190df4a2SClaudio Imbrenda 
1184190df4a2SClaudio Imbrenda 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1185190df4a2SClaudio Imbrenda 		return -ENXIO;
1186190df4a2SClaudio Imbrenda 
1187190df4a2SClaudio Imbrenda 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1188190df4a2SClaudio Imbrenda 		return -EFAULT;
1189190df4a2SClaudio Imbrenda 	return 0;
1190190df4a2SClaudio Imbrenda }
1191190df4a2SClaudio Imbrenda 
11928fa1696eSCollin L. Walling static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
11938fa1696eSCollin L. Walling {
11948fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
11958fa1696eSCollin L. Walling 
11968fa1696eSCollin L. Walling 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
11978fa1696eSCollin L. Walling 		return -EFAULT;
11988fa1696eSCollin L. Walling 
11990e7def5fSDavid Hildenbrand 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
12008fa1696eSCollin L. Walling 		return -EINVAL;
12010e7def5fSDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, &gtod);
12028fa1696eSCollin L. Walling 
12038fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
12048fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
12058fa1696eSCollin L. Walling 
12068fa1696eSCollin L. Walling 	return 0;
12078fa1696eSCollin L. Walling }
12088fa1696eSCollin L. Walling 
120972f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
121072f25020SJason J. Herne {
121172f25020SJason J. Herne 	u8 gtod_high;
121272f25020SJason J. Herne 
121372f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
121472f25020SJason J. Herne 					   sizeof(gtod_high)))
121572f25020SJason J. Herne 		return -EFAULT;
121672f25020SJason J. Herne 
121772f25020SJason J. Herne 	if (gtod_high != 0)
121872f25020SJason J. Herne 		return -EINVAL;
121958c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
122072f25020SJason J. Herne 
122172f25020SJason J. Herne 	return 0;
122272f25020SJason J. Herne }
122372f25020SJason J. Herne 
122472f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
122572f25020SJason J. Herne {
12260e7def5fSDavid Hildenbrand 	struct kvm_s390_vm_tod_clock gtod = { 0 };
122772f25020SJason J. Herne 
12280e7def5fSDavid Hildenbrand 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
12290e7def5fSDavid Hildenbrand 			   sizeof(gtod.tod)))
123072f25020SJason J. Herne 		return -EFAULT;
123172f25020SJason J. Herne 
12320e7def5fSDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, &gtod);
12330e7def5fSDavid Hildenbrand 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
123472f25020SJason J. Herne 	return 0;
123572f25020SJason J. Herne }
123672f25020SJason J. Herne 
123772f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
123872f25020SJason J. Herne {
123972f25020SJason J. Herne 	int ret;
124072f25020SJason J. Herne 
124172f25020SJason J. Herne 	if (attr->flags)
124272f25020SJason J. Herne 		return -EINVAL;
124372f25020SJason J. Herne 
124472f25020SJason J. Herne 	switch (attr->attr) {
12458fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
12468fa1696eSCollin L. Walling 		ret = kvm_s390_set_tod_ext(kvm, attr);
12478fa1696eSCollin L. Walling 		break;
124872f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
124972f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
125072f25020SJason J. Herne 		break;
125172f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
125272f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
125372f25020SJason J. Herne 		break;
125472f25020SJason J. Herne 	default:
125572f25020SJason J. Herne 		ret = -ENXIO;
125672f25020SJason J. Herne 		break;
125772f25020SJason J. Herne 	}
125872f25020SJason J. Herne 	return ret;
125972f25020SJason J. Herne }
126072f25020SJason J. Herne 
126133d1b272SDavid Hildenbrand static void kvm_s390_get_tod_clock(struct kvm *kvm,
12628fa1696eSCollin L. Walling 				   struct kvm_s390_vm_tod_clock *gtod)
12638fa1696eSCollin L. Walling {
12642cfd7b73SHeiko Carstens 	union tod_clock clk;
12658fa1696eSCollin L. Walling 
12668fa1696eSCollin L. Walling 	preempt_disable();
12678fa1696eSCollin L. Walling 
12682cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
12698fa1696eSCollin L. Walling 
12702cfd7b73SHeiko Carstens 	gtod->tod = clk.tod + kvm->arch.epoch;
127133d1b272SDavid Hildenbrand 	gtod->epoch_idx = 0;
127233d1b272SDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
12732cfd7b73SHeiko Carstens 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
12742cfd7b73SHeiko Carstens 		if (gtod->tod < clk.tod)
12758fa1696eSCollin L. Walling 			gtod->epoch_idx += 1;
127633d1b272SDavid Hildenbrand 	}
12778fa1696eSCollin L. Walling 
12788fa1696eSCollin L. Walling 	preempt_enable();
12798fa1696eSCollin L. Walling }
12808fa1696eSCollin L. Walling 
12818fa1696eSCollin L. Walling static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
12828fa1696eSCollin L. Walling {
12838fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
12848fa1696eSCollin L. Walling 
12858fa1696eSCollin L. Walling 	memset(&gtod, 0, sizeof(gtod));
128633d1b272SDavid Hildenbrand 	kvm_s390_get_tod_clock(kvm, &gtod);
12878fa1696eSCollin L. Walling 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
12888fa1696eSCollin L. Walling 		return -EFAULT;
12898fa1696eSCollin L. Walling 
12908fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
12918fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
12928fa1696eSCollin L. Walling 	return 0;
12938fa1696eSCollin L. Walling }
12948fa1696eSCollin L. Walling 
129572f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
129672f25020SJason J. Herne {
129772f25020SJason J. Herne 	u8 gtod_high = 0;
129872f25020SJason J. Herne 
129972f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
130072f25020SJason J. Herne 					 sizeof(gtod_high)))
130172f25020SJason J. Herne 		return -EFAULT;
130258c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
130372f25020SJason J. Herne 
130472f25020SJason J. Herne 	return 0;
130572f25020SJason J. Herne }
130672f25020SJason J. Herne 
130772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
130872f25020SJason J. Herne {
13095a3d883aSDavid Hildenbrand 	u64 gtod;
131072f25020SJason J. Herne 
131160417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
131272f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
131372f25020SJason J. Herne 		return -EFAULT;
131458c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
131572f25020SJason J. Herne 
131672f25020SJason J. Herne 	return 0;
131772f25020SJason J. Herne }
131872f25020SJason J. Herne 
131972f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
132072f25020SJason J. Herne {
132172f25020SJason J. Herne 	int ret;
132272f25020SJason J. Herne 
132372f25020SJason J. Herne 	if (attr->flags)
132472f25020SJason J. Herne 		return -EINVAL;
132572f25020SJason J. Herne 
132672f25020SJason J. Herne 	switch (attr->attr) {
13278fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
13288fa1696eSCollin L. Walling 		ret = kvm_s390_get_tod_ext(kvm, attr);
13298fa1696eSCollin L. Walling 		break;
133072f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
133172f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
133272f25020SJason J. Herne 		break;
133372f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
133472f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
133572f25020SJason J. Herne 		break;
133672f25020SJason J. Herne 	default:
133772f25020SJason J. Herne 		ret = -ENXIO;
133872f25020SJason J. Herne 		break;
133972f25020SJason J. Herne 	}
134072f25020SJason J. Herne 	return ret;
134172f25020SJason J. Herne }
134272f25020SJason J. Herne 
1343658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1344658b6edaSMichael Mueller {
1345658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1346053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
1347658b6edaSMichael Mueller 	int ret = 0;
1348658b6edaSMichael Mueller 
1349658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
1350a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
1351658b6edaSMichael Mueller 		ret = -EBUSY;
1352658b6edaSMichael Mueller 		goto out;
1353658b6edaSMichael Mueller 	}
1354c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1355658b6edaSMichael Mueller 	if (!proc) {
1356658b6edaSMichael Mueller 		ret = -ENOMEM;
1357658b6edaSMichael Mueller 		goto out;
1358658b6edaSMichael Mueller 	}
1359658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
1360658b6edaSMichael Mueller 			    sizeof(*proc))) {
13619bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
1362053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1363053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
13640487c44dSDavid Hildenbrand 		if (lowest_ibc && proc->ibc) {
1365053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
1366053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
1367053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
1368053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
1369053dd230SDavid Hildenbrand 			else
1370658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
1371053dd230SDavid Hildenbrand 		}
1372c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1373658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1374a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1375a8c39dd7SChristian Borntraeger 			 kvm->arch.model.ibc,
1376a8c39dd7SChristian Borntraeger 			 kvm->arch.model.cpuid);
1377a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1378a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[0],
1379a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[1],
1380a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[2]);
1381658b6edaSMichael Mueller 	} else
1382658b6edaSMichael Mueller 		ret = -EFAULT;
1383658b6edaSMichael Mueller 	kfree(proc);
1384658b6edaSMichael Mueller out:
1385658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
1386658b6edaSMichael Mueller 	return ret;
1387658b6edaSMichael Mueller }
1388658b6edaSMichael Mueller 
138915c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
139015c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
139115c9705fSDavid Hildenbrand {
139215c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
139315c9705fSDavid Hildenbrand 
139415c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
139515c9705fSDavid Hildenbrand 		return -EFAULT;
139615c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
139715c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
139815c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
139915c9705fSDavid Hildenbrand 		return -EINVAL;
140015c9705fSDavid Hildenbrand 
140115c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
14022f8311c9SChristian Borntraeger 	if (kvm->created_vcpus) {
14032f8311c9SChristian Borntraeger 		mutex_unlock(&kvm->lock);
14042f8311c9SChristian Borntraeger 		return -EBUSY;
14052f8311c9SChristian Borntraeger 	}
1406da0f8e95SYury Norov 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
140715c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
14082f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
14092f8311c9SChristian Borntraeger 			 data.feat[0],
14102f8311c9SChristian Borntraeger 			 data.feat[1],
14112f8311c9SChristian Borntraeger 			 data.feat[2]);
14122f8311c9SChristian Borntraeger 	return 0;
141315c9705fSDavid Hildenbrand }
141415c9705fSDavid Hildenbrand 
14150a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
14160a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
14170a763c78SDavid Hildenbrand {
1418346fa2f8SChristian Borntraeger 	mutex_lock(&kvm->lock);
1419346fa2f8SChristian Borntraeger 	if (kvm->created_vcpus) {
1420346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1421346fa2f8SChristian Borntraeger 		return -EBUSY;
1422346fa2f8SChristian Borntraeger 	}
1423346fa2f8SChristian Borntraeger 
1424346fa2f8SChristian Borntraeger 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1425346fa2f8SChristian Borntraeger 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1426346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1427346fa2f8SChristian Borntraeger 		return -EFAULT;
1428346fa2f8SChristian Borntraeger 	}
1429346fa2f8SChristian Borntraeger 	mutex_unlock(&kvm->lock);
1430346fa2f8SChristian Borntraeger 
143111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
143211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
143311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
143411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
143511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
143611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
143711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
143811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
143911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
144011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
144111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
144211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
144311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
144411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
144511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
144611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
144711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
144811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
144911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
145011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
145111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
145211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
145311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
145411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
145511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
145611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
145711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
145811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
145911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
146011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
146111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
146211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
146311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
146411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
146511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
146611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
146711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
146811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
146911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
147011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
147111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
147211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
147311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
147411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
147513209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
147613209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
147713209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1478173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1479173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1480173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1481173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1482173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
14834f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
14844f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
14854f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
14864f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
14874f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
148811ba5961SChristian Borntraeger 
1489346fa2f8SChristian Borntraeger 	return 0;
14900a763c78SDavid Hildenbrand }
14910a763c78SDavid Hildenbrand 
1492658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1493658b6edaSMichael Mueller {
1494658b6edaSMichael Mueller 	int ret = -ENXIO;
1495658b6edaSMichael Mueller 
1496658b6edaSMichael Mueller 	switch (attr->attr) {
1497658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1498658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
1499658b6edaSMichael Mueller 		break;
150015c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
150115c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
150215c9705fSDavid Hildenbrand 		break;
15030a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
15040a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
15050a763c78SDavid Hildenbrand 		break;
1506658b6edaSMichael Mueller 	}
1507658b6edaSMichael Mueller 	return ret;
1508658b6edaSMichael Mueller }
1509658b6edaSMichael Mueller 
1510658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1511658b6edaSMichael Mueller {
1512658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1513658b6edaSMichael Mueller 	int ret = 0;
1514658b6edaSMichael Mueller 
1515c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1516658b6edaSMichael Mueller 	if (!proc) {
1517658b6edaSMichael Mueller 		ret = -ENOMEM;
1518658b6edaSMichael Mueller 		goto out;
1519658b6edaSMichael Mueller 	}
15209bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
1521658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
1522c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1523c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1524a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1525a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1526a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1527a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1528a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[0],
1529a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[1],
1530a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[2]);
1531658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1532658b6edaSMichael Mueller 		ret = -EFAULT;
1533658b6edaSMichael Mueller 	kfree(proc);
1534658b6edaSMichael Mueller out:
1535658b6edaSMichael Mueller 	return ret;
1536658b6edaSMichael Mueller }
1537658b6edaSMichael Mueller 
1538658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1539658b6edaSMichael Mueller {
1540658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
1541658b6edaSMichael Mueller 	int ret = 0;
1542658b6edaSMichael Mueller 
1543c4196218SChristian Borntraeger 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1544658b6edaSMichael Mueller 	if (!mach) {
1545658b6edaSMichael Mueller 		ret = -ENOMEM;
1546658b6edaSMichael Mueller 		goto out;
1547658b6edaSMichael Mueller 	}
1548658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
154937c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
1550c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1551981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
155217e89e13SSven Schnelle 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
155317e89e13SSven Schnelle 	       sizeof(stfle_fac_list));
1554a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1555a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1556a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1557a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1558a8c39dd7SChristian Borntraeger 		 mach->fac_mask[0],
1559a8c39dd7SChristian Borntraeger 		 mach->fac_mask[1],
1560a8c39dd7SChristian Borntraeger 		 mach->fac_mask[2]);
1561a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1562a8c39dd7SChristian Borntraeger 		 mach->fac_list[0],
1563a8c39dd7SChristian Borntraeger 		 mach->fac_list[1],
1564a8c39dd7SChristian Borntraeger 		 mach->fac_list[2]);
1565658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1566658b6edaSMichael Mueller 		ret = -EFAULT;
1567658b6edaSMichael Mueller 	kfree(mach);
1568658b6edaSMichael Mueller out:
1569658b6edaSMichael Mueller 	return ret;
1570658b6edaSMichael Mueller }
1571658b6edaSMichael Mueller 
157215c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
157315c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
157415c9705fSDavid Hildenbrand {
157515c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
157615c9705fSDavid Hildenbrand 
1577da0f8e95SYury Norov 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
157815c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
157915c9705fSDavid Hildenbrand 		return -EFAULT;
15802f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
15812f8311c9SChristian Borntraeger 			 data.feat[0],
15822f8311c9SChristian Borntraeger 			 data.feat[1],
15832f8311c9SChristian Borntraeger 			 data.feat[2]);
158415c9705fSDavid Hildenbrand 	return 0;
158515c9705fSDavid Hildenbrand }
158615c9705fSDavid Hildenbrand 
158715c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
158815c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
158915c9705fSDavid Hildenbrand {
159015c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
159115c9705fSDavid Hildenbrand 
1592da0f8e95SYury Norov 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
159315c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
159415c9705fSDavid Hildenbrand 		return -EFAULT;
15952f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
15962f8311c9SChristian Borntraeger 			 data.feat[0],
15972f8311c9SChristian Borntraeger 			 data.feat[1],
15982f8311c9SChristian Borntraeger 			 data.feat[2]);
159915c9705fSDavid Hildenbrand 	return 0;
160015c9705fSDavid Hildenbrand }
160115c9705fSDavid Hildenbrand 
16020a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
16030a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
16040a763c78SDavid Hildenbrand {
1605346fa2f8SChristian Borntraeger 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1606346fa2f8SChristian Borntraeger 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1607346fa2f8SChristian Borntraeger 		return -EFAULT;
1608346fa2f8SChristian Borntraeger 
160911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
161011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
161111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
161211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
161311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
161411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
161511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
161611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
161711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
161811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
161911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
162011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
162111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
162211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
162311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
162411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
162511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
162611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
162711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
162811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
162911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
163011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
163111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
163211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
163311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
163411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
163511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
163611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
163711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
163811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
163911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
164011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
164111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
164211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
164311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
164411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
164511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
164611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
164711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
164811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
164911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
165011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
165111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
165211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
165313209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
165413209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
165513209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1656173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1657173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1658173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1659173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1660173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
16614f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
16624f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
16634f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
16644f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
16654f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
166611ba5961SChristian Borntraeger 
1667346fa2f8SChristian Borntraeger 	return 0;
16680a763c78SDavid Hildenbrand }
16690a763c78SDavid Hildenbrand 
16700a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
16710a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
16720a763c78SDavid Hildenbrand {
16730a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
16740a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
16750a763c78SDavid Hildenbrand 		return -EFAULT;
167611ba5961SChristian Borntraeger 
167711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
167811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
167911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
168011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
168111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
168211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
168311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
168411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
168511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
168611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
168711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
168811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
168911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
169011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
169111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
169211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
169311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
169411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
169511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
169611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
169711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
169811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
169911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
170011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
170111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
170211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
170311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
170411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
170511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
170611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
170711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
170811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
170911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
171011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
171111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
171211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
171311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
171411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
171511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
171611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
171711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
171811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
171911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
172011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
172113209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
172213209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
172313209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1724173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1725173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1726173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1727173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1728173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
17294f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17304f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
17314f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
17324f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
17334f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
173411ba5961SChristian Borntraeger 
17350a763c78SDavid Hildenbrand 	return 0;
17360a763c78SDavid Hildenbrand }
1737346fa2f8SChristian Borntraeger 
1738658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1739658b6edaSMichael Mueller {
1740658b6edaSMichael Mueller 	int ret = -ENXIO;
1741658b6edaSMichael Mueller 
1742658b6edaSMichael Mueller 	switch (attr->attr) {
1743658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1744658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
1745658b6edaSMichael Mueller 		break;
1746658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
1747658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
1748658b6edaSMichael Mueller 		break;
174915c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
175015c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
175115c9705fSDavid Hildenbrand 		break;
175215c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
175315c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
175415c9705fSDavid Hildenbrand 		break;
17550a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
17560a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
17570a763c78SDavid Hildenbrand 		break;
17580a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
17590a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
17600a763c78SDavid Hildenbrand 		break;
1761658b6edaSMichael Mueller 	}
1762658b6edaSMichael Mueller 	return ret;
1763658b6edaSMichael Mueller }
1764658b6edaSMichael Mueller 
1765f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1766f2061656SDominik Dingel {
1767f2061656SDominik Dingel 	int ret;
1768f2061656SDominik Dingel 
1769f2061656SDominik Dingel 	switch (attr->group) {
17704f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
17718c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
17724f718eabSDominik Dingel 		break;
177372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
177472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
177572f25020SJason J. Herne 		break;
1776658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1777658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
1778658b6edaSMichael Mueller 		break;
1779a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1780a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1781a374e892STony Krowiak 		break;
1782190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1783190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_set_migration(kvm, attr);
1784190df4a2SClaudio Imbrenda 		break;
1785f2061656SDominik Dingel 	default:
1786f2061656SDominik Dingel 		ret = -ENXIO;
1787f2061656SDominik Dingel 		break;
1788f2061656SDominik Dingel 	}
1789f2061656SDominik Dingel 
1790f2061656SDominik Dingel 	return ret;
1791f2061656SDominik Dingel }
1792f2061656SDominik Dingel 
1793f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1794f2061656SDominik Dingel {
17958c0a7ce6SDominik Dingel 	int ret;
17968c0a7ce6SDominik Dingel 
17978c0a7ce6SDominik Dingel 	switch (attr->group) {
17988c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
17998c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
18008c0a7ce6SDominik Dingel 		break;
180172f25020SJason J. Herne 	case KVM_S390_VM_TOD:
180272f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
180372f25020SJason J. Herne 		break;
1804658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1805658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1806658b6edaSMichael Mueller 		break;
1807190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1808190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_get_migration(kvm, attr);
1809190df4a2SClaudio Imbrenda 		break;
18108c0a7ce6SDominik Dingel 	default:
18118c0a7ce6SDominik Dingel 		ret = -ENXIO;
18128c0a7ce6SDominik Dingel 		break;
18138c0a7ce6SDominik Dingel 	}
18148c0a7ce6SDominik Dingel 
18158c0a7ce6SDominik Dingel 	return ret;
1816f2061656SDominik Dingel }
1817f2061656SDominik Dingel 
1818f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1819f2061656SDominik Dingel {
1820f2061656SDominik Dingel 	int ret;
1821f2061656SDominik Dingel 
1822f2061656SDominik Dingel 	switch (attr->group) {
18234f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18244f718eabSDominik Dingel 		switch (attr->attr) {
18254f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
18264f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1827f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1828f9cbd9b0SDavid Hildenbrand 			break;
18298c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
18304f718eabSDominik Dingel 			ret = 0;
18314f718eabSDominik Dingel 			break;
18324f718eabSDominik Dingel 		default:
18334f718eabSDominik Dingel 			ret = -ENXIO;
18344f718eabSDominik Dingel 			break;
18354f718eabSDominik Dingel 		}
18364f718eabSDominik Dingel 		break;
183772f25020SJason J. Herne 	case KVM_S390_VM_TOD:
183872f25020SJason J. Herne 		switch (attr->attr) {
183972f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
184072f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
184172f25020SJason J. Herne 			ret = 0;
184272f25020SJason J. Herne 			break;
184372f25020SJason J. Herne 		default:
184472f25020SJason J. Herne 			ret = -ENXIO;
184572f25020SJason J. Herne 			break;
184672f25020SJason J. Herne 		}
184772f25020SJason J. Herne 		break;
1848658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1849658b6edaSMichael Mueller 		switch (attr->attr) {
1850658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1851658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
185215c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
185315c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
18540a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1855346fa2f8SChristian Borntraeger 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1856658b6edaSMichael Mueller 			ret = 0;
1857658b6edaSMichael Mueller 			break;
1858658b6edaSMichael Mueller 		default:
1859658b6edaSMichael Mueller 			ret = -ENXIO;
1860658b6edaSMichael Mueller 			break;
1861658b6edaSMichael Mueller 		}
1862658b6edaSMichael Mueller 		break;
1863a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1864a374e892STony Krowiak 		switch (attr->attr) {
1865a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1866a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1867a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1868a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1869a374e892STony Krowiak 			ret = 0;
1870a374e892STony Krowiak 			break;
187137940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
187237940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
187337940fb0STony Krowiak 			ret = ap_instructions_available() ? 0 : -ENXIO;
187437940fb0STony Krowiak 			break;
1875a374e892STony Krowiak 		default:
1876a374e892STony Krowiak 			ret = -ENXIO;
1877a374e892STony Krowiak 			break;
1878a374e892STony Krowiak 		}
1879a374e892STony Krowiak 		break;
1880190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1881190df4a2SClaudio Imbrenda 		ret = 0;
1882190df4a2SClaudio Imbrenda 		break;
1883f2061656SDominik Dingel 	default:
1884f2061656SDominik Dingel 		ret = -ENXIO;
1885f2061656SDominik Dingel 		break;
1886f2061656SDominik Dingel 	}
1887f2061656SDominik Dingel 
1888f2061656SDominik Dingel 	return ret;
1889f2061656SDominik Dingel }
1890f2061656SDominik Dingel 
189130ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
189230ee2a98SJason J. Herne {
189330ee2a98SJason J. Herne 	uint8_t *keys;
189430ee2a98SJason J. Herne 	uint64_t hva;
18954f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
189630ee2a98SJason J. Herne 
189730ee2a98SJason J. Herne 	if (args->flags != 0)
189830ee2a98SJason J. Herne 		return -EINVAL;
189930ee2a98SJason J. Herne 
190030ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
190155531b74SJanosch Frank 	if (!mm_uses_skeys(current->mm))
190230ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
190330ee2a98SJason J. Herne 
190430ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
190530ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
190630ee2a98SJason J. Herne 		return -EINVAL;
190730ee2a98SJason J. Herne 
1908c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
190930ee2a98SJason J. Herne 	if (!keys)
191030ee2a98SJason J. Herne 		return -ENOMEM;
191130ee2a98SJason J. Herne 
1912d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
19134f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
191430ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
191530ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
191630ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
191730ee2a98SJason J. Herne 			r = -EFAULT;
1918d3ed1ceeSMartin Schwidefsky 			break;
191930ee2a98SJason J. Herne 		}
192030ee2a98SJason J. Herne 
1921154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1922154c8c19SDavid Hildenbrand 		if (r)
1923d3ed1ceeSMartin Schwidefsky 			break;
192430ee2a98SJason J. Herne 	}
19254f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1926d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
192730ee2a98SJason J. Herne 
1928d3ed1ceeSMartin Schwidefsky 	if (!r) {
192930ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
193030ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
193130ee2a98SJason J. Herne 		if (r)
193230ee2a98SJason J. Herne 			r = -EFAULT;
1933d3ed1ceeSMartin Schwidefsky 	}
1934d3ed1ceeSMartin Schwidefsky 
193530ee2a98SJason J. Herne 	kvfree(keys);
193630ee2a98SJason J. Herne 	return r;
193730ee2a98SJason J. Herne }
193830ee2a98SJason J. Herne 
193930ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
194030ee2a98SJason J. Herne {
194130ee2a98SJason J. Herne 	uint8_t *keys;
194230ee2a98SJason J. Herne 	uint64_t hva;
19434f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
1944bd096f64SJanosch Frank 	bool unlocked;
194530ee2a98SJason J. Herne 
194630ee2a98SJason J. Herne 	if (args->flags != 0)
194730ee2a98SJason J. Herne 		return -EINVAL;
194830ee2a98SJason J. Herne 
194930ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
195030ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
195130ee2a98SJason J. Herne 		return -EINVAL;
195230ee2a98SJason J. Herne 
1953c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
195430ee2a98SJason J. Herne 	if (!keys)
195530ee2a98SJason J. Herne 		return -ENOMEM;
195630ee2a98SJason J. Herne 
195730ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
195830ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
195930ee2a98SJason J. Herne 	if (r) {
196030ee2a98SJason J. Herne 		r = -EFAULT;
196130ee2a98SJason J. Herne 		goto out;
196230ee2a98SJason J. Herne 	}
196330ee2a98SJason J. Herne 
196430ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
196514d4a425SDominik Dingel 	r = s390_enable_skey();
196614d4a425SDominik Dingel 	if (r)
196714d4a425SDominik Dingel 		goto out;
196830ee2a98SJason J. Herne 
1969bd096f64SJanosch Frank 	i = 0;
1970d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
19714f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
1972bd096f64SJanosch Frank         while (i < args->count) {
1973bd096f64SJanosch Frank 		unlocked = false;
197430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
197530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
197630ee2a98SJason J. Herne 			r = -EFAULT;
1977d3ed1ceeSMartin Schwidefsky 			break;
197830ee2a98SJason J. Herne 		}
197930ee2a98SJason J. Herne 
198030ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
198130ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
198230ee2a98SJason J. Herne 			r = -EINVAL;
1983d3ed1ceeSMartin Schwidefsky 			break;
198430ee2a98SJason J. Herne 		}
198530ee2a98SJason J. Herne 
1986fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1987bd096f64SJanosch Frank 		if (r) {
198864019a2eSPeter Xu 			r = fixup_user_fault(current->mm, hva,
1989bd096f64SJanosch Frank 					     FAULT_FLAG_WRITE, &unlocked);
199030ee2a98SJason J. Herne 			if (r)
1991d3ed1ceeSMartin Schwidefsky 				break;
199230ee2a98SJason J. Herne 		}
1993bd096f64SJanosch Frank 		if (!r)
1994bd096f64SJanosch Frank 			i++;
1995bd096f64SJanosch Frank 	}
19964f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1997d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
199830ee2a98SJason J. Herne out:
199930ee2a98SJason J. Herne 	kvfree(keys);
200030ee2a98SJason J. Herne 	return r;
200130ee2a98SJason J. Herne }
200230ee2a98SJason J. Herne 
20034036e387SClaudio Imbrenda /*
20044036e387SClaudio Imbrenda  * Base address and length must be sent at the start of each block, therefore
20054036e387SClaudio Imbrenda  * it's cheaper to send some clean data, as long as it's less than the size of
20064036e387SClaudio Imbrenda  * two longs.
20074036e387SClaudio Imbrenda  */
20084036e387SClaudio Imbrenda #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
20094036e387SClaudio Imbrenda /* for consistency */
20104036e387SClaudio Imbrenda #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
20114036e387SClaudio Imbrenda 
2012afdad616SClaudio Imbrenda static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2013afdad616SClaudio Imbrenda 			      u8 *res, unsigned long bufsize)
2014afdad616SClaudio Imbrenda {
2015afdad616SClaudio Imbrenda 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2016afdad616SClaudio Imbrenda 
2017afdad616SClaudio Imbrenda 	args->count = 0;
2018afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2019afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2020afdad616SClaudio Imbrenda 		/*
2021afdad616SClaudio Imbrenda 		 * We return an error if the first value was invalid, but we
2022afdad616SClaudio Imbrenda 		 * return successfully if at least one value was copied.
2023afdad616SClaudio Imbrenda 		 */
2024afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2025afdad616SClaudio Imbrenda 			return args->count ? 0 : -EFAULT;
2026afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2027afdad616SClaudio Imbrenda 			pgstev = 0;
2028afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2029afdad616SClaudio Imbrenda 		cur_gfn++;
2030afdad616SClaudio Imbrenda 	}
2031afdad616SClaudio Imbrenda 
2032afdad616SClaudio Imbrenda 	return 0;
2033afdad616SClaudio Imbrenda }
2034afdad616SClaudio Imbrenda 
2035c928bfc2SMaciej S. Szmigiero static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2036c928bfc2SMaciej S. Szmigiero 						     gfn_t gfn)
2037c928bfc2SMaciej S. Szmigiero {
2038c928bfc2SMaciej S. Szmigiero 	return ____gfn_to_memslot(slots, gfn, true);
2039c928bfc2SMaciej S. Szmigiero }
2040c928bfc2SMaciej S. Szmigiero 
2041afdad616SClaudio Imbrenda static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2042afdad616SClaudio Imbrenda 					      unsigned long cur_gfn)
2043afdad616SClaudio Imbrenda {
2044c928bfc2SMaciej S. Szmigiero 	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2045afdad616SClaudio Imbrenda 	unsigned long ofs = cur_gfn - ms->base_gfn;
2046a54d8066SMaciej S. Szmigiero 	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2047afdad616SClaudio Imbrenda 
2048afdad616SClaudio Imbrenda 	if (ms->base_gfn + ms->npages <= cur_gfn) {
2049a54d8066SMaciej S. Szmigiero 		mnode = rb_next(mnode);
2050afdad616SClaudio Imbrenda 		/* If we are above the highest slot, wrap around */
2051a54d8066SMaciej S. Szmigiero 		if (!mnode)
2052a54d8066SMaciej S. Szmigiero 			mnode = rb_first(&slots->gfn_tree);
2053afdad616SClaudio Imbrenda 
2054a54d8066SMaciej S. Szmigiero 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2055afdad616SClaudio Imbrenda 		ofs = 0;
2056afdad616SClaudio Imbrenda 	}
2057afdad616SClaudio Imbrenda 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2058a54d8066SMaciej S. Szmigiero 	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2059a54d8066SMaciej S. Szmigiero 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2060b5c7e7ecSYury Norov 		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2061afdad616SClaudio Imbrenda 	}
2062afdad616SClaudio Imbrenda 	return ms->base_gfn + ofs;
2063afdad616SClaudio Imbrenda }
2064afdad616SClaudio Imbrenda 
2065afdad616SClaudio Imbrenda static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2066afdad616SClaudio Imbrenda 			     u8 *res, unsigned long bufsize)
2067afdad616SClaudio Imbrenda {
2068afdad616SClaudio Imbrenda 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2069afdad616SClaudio Imbrenda 	struct kvm_memslots *slots = kvm_memslots(kvm);
2070afdad616SClaudio Imbrenda 	struct kvm_memory_slot *ms;
2071afdad616SClaudio Imbrenda 
2072a54d8066SMaciej S. Szmigiero 	if (unlikely(kvm_memslots_empty(slots)))
20730774a964SSean Christopherson 		return 0;
20740774a964SSean Christopherson 
2075afdad616SClaudio Imbrenda 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2076afdad616SClaudio Imbrenda 	ms = gfn_to_memslot(kvm, cur_gfn);
2077afdad616SClaudio Imbrenda 	args->count = 0;
2078afdad616SClaudio Imbrenda 	args->start_gfn = cur_gfn;
2079afdad616SClaudio Imbrenda 	if (!ms)
2080afdad616SClaudio Imbrenda 		return 0;
2081afdad616SClaudio Imbrenda 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
20826a656832SMaciej S. Szmigiero 	mem_end = kvm_s390_get_gfn_end(slots);
2083afdad616SClaudio Imbrenda 
2084afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2085afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2086afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2087afdad616SClaudio Imbrenda 			return 0;
2088afdad616SClaudio Imbrenda 		/* Decrement only if we actually flipped the bit to 0 */
2089afdad616SClaudio Imbrenda 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2090afdad616SClaudio Imbrenda 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2091afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2092afdad616SClaudio Imbrenda 			pgstev = 0;
2093afdad616SClaudio Imbrenda 		/* Save the value */
2094afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2095afdad616SClaudio Imbrenda 		/* If the next bit is too far away, stop. */
2096afdad616SClaudio Imbrenda 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2097afdad616SClaudio Imbrenda 			return 0;
2098afdad616SClaudio Imbrenda 		/* If we reached the previous "next", find the next one */
2099afdad616SClaudio Imbrenda 		if (cur_gfn == next_gfn)
2100afdad616SClaudio Imbrenda 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2101afdad616SClaudio Imbrenda 		/* Reached the end of memory or of the buffer, stop */
2102afdad616SClaudio Imbrenda 		if ((next_gfn >= mem_end) ||
2103afdad616SClaudio Imbrenda 		    (next_gfn - args->start_gfn >= bufsize))
2104afdad616SClaudio Imbrenda 			return 0;
2105afdad616SClaudio Imbrenda 		cur_gfn++;
2106afdad616SClaudio Imbrenda 		/* Reached the end of the current memslot, take the next one. */
2107afdad616SClaudio Imbrenda 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2108afdad616SClaudio Imbrenda 			ms = gfn_to_memslot(kvm, cur_gfn);
2109afdad616SClaudio Imbrenda 			if (!ms)
2110afdad616SClaudio Imbrenda 				return 0;
2111afdad616SClaudio Imbrenda 		}
2112afdad616SClaudio Imbrenda 	}
2113afdad616SClaudio Imbrenda 	return 0;
2114afdad616SClaudio Imbrenda }
2115afdad616SClaudio Imbrenda 
2116afdad616SClaudio Imbrenda /*
21174036e387SClaudio Imbrenda  * This function searches for the next page with dirty CMMA attributes, and
21184036e387SClaudio Imbrenda  * saves the attributes in the buffer up to either the end of the buffer or
21194036e387SClaudio Imbrenda  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
21204036e387SClaudio Imbrenda  * no trailing clean bytes are saved.
21214036e387SClaudio Imbrenda  * In case no dirty bits were found, or if CMMA was not enabled or used, the
21224036e387SClaudio Imbrenda  * output buffer will indicate 0 as length.
21234036e387SClaudio Imbrenda  */
21244036e387SClaudio Imbrenda static int kvm_s390_get_cmma_bits(struct kvm *kvm,
21254036e387SClaudio Imbrenda 				  struct kvm_s390_cmma_log *args)
21264036e387SClaudio Imbrenda {
2127afdad616SClaudio Imbrenda 	unsigned long bufsize;
2128afdad616SClaudio Imbrenda 	int srcu_idx, peek, ret;
2129afdad616SClaudio Imbrenda 	u8 *values;
21304036e387SClaudio Imbrenda 
2131afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
21324036e387SClaudio Imbrenda 		return -ENXIO;
21334036e387SClaudio Imbrenda 	/* Invalid/unsupported flags were specified */
21344036e387SClaudio Imbrenda 	if (args->flags & ~KVM_S390_CMMA_PEEK)
21354036e387SClaudio Imbrenda 		return -EINVAL;
21364036e387SClaudio Imbrenda 	/* Migration mode query, and we are not doing a migration */
21374036e387SClaudio Imbrenda 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2138afdad616SClaudio Imbrenda 	if (!peek && !kvm->arch.migration_mode)
21394036e387SClaudio Imbrenda 		return -EINVAL;
21404036e387SClaudio Imbrenda 	/* CMMA is disabled or was not used, or the buffer has length zero */
21414036e387SClaudio Imbrenda 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2142c9f0a2b8SJanosch Frank 	if (!bufsize || !kvm->mm->context.uses_cmm) {
21434036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
21444036e387SClaudio Imbrenda 		return 0;
21454036e387SClaudio Imbrenda 	}
21464036e387SClaudio Imbrenda 	/* We are not peeking, and there are no dirty pages */
2147afdad616SClaudio Imbrenda 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
21484036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
21494036e387SClaudio Imbrenda 		return 0;
21504036e387SClaudio Imbrenda 	}
21514036e387SClaudio Imbrenda 
2152afdad616SClaudio Imbrenda 	values = vmalloc(bufsize);
2153afdad616SClaudio Imbrenda 	if (!values)
21544036e387SClaudio Imbrenda 		return -ENOMEM;
21554036e387SClaudio Imbrenda 
2156d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
21574036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
2158afdad616SClaudio Imbrenda 	if (peek)
2159afdad616SClaudio Imbrenda 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2160afdad616SClaudio Imbrenda 	else
2161afdad616SClaudio Imbrenda 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
21624036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2163d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
21644036e387SClaudio Imbrenda 
2165afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
2166afdad616SClaudio Imbrenda 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2167afdad616SClaudio Imbrenda 	else
2168afdad616SClaudio Imbrenda 		args->remaining = 0;
21694036e387SClaudio Imbrenda 
2170afdad616SClaudio Imbrenda 	if (copy_to_user((void __user *)args->values, values, args->count))
2171afdad616SClaudio Imbrenda 		ret = -EFAULT;
2172afdad616SClaudio Imbrenda 
2173afdad616SClaudio Imbrenda 	vfree(values);
2174afdad616SClaudio Imbrenda 	return ret;
21754036e387SClaudio Imbrenda }
21764036e387SClaudio Imbrenda 
21774036e387SClaudio Imbrenda /*
21784036e387SClaudio Imbrenda  * This function sets the CMMA attributes for the given pages. If the input
21794036e387SClaudio Imbrenda  * buffer has zero length, no action is taken, otherwise the attributes are
2180c9f0a2b8SJanosch Frank  * set and the mm->context.uses_cmm flag is set.
21814036e387SClaudio Imbrenda  */
21824036e387SClaudio Imbrenda static int kvm_s390_set_cmma_bits(struct kvm *kvm,
21834036e387SClaudio Imbrenda 				  const struct kvm_s390_cmma_log *args)
21844036e387SClaudio Imbrenda {
21854036e387SClaudio Imbrenda 	unsigned long hva, mask, pgstev, i;
21864036e387SClaudio Imbrenda 	uint8_t *bits;
21874036e387SClaudio Imbrenda 	int srcu_idx, r = 0;
21884036e387SClaudio Imbrenda 
21894036e387SClaudio Imbrenda 	mask = args->mask;
21904036e387SClaudio Imbrenda 
21914036e387SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
21924036e387SClaudio Imbrenda 		return -ENXIO;
21934036e387SClaudio Imbrenda 	/* invalid/unsupported flags */
21944036e387SClaudio Imbrenda 	if (args->flags != 0)
21954036e387SClaudio Imbrenda 		return -EINVAL;
21964036e387SClaudio Imbrenda 	/* Enforce sane limit on memory allocation */
21974036e387SClaudio Imbrenda 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
21984036e387SClaudio Imbrenda 		return -EINVAL;
21994036e387SClaudio Imbrenda 	/* Nothing to do */
22004036e387SClaudio Imbrenda 	if (args->count == 0)
22014036e387SClaudio Imbrenda 		return 0;
22024036e387SClaudio Imbrenda 
220342bc47b3SKees Cook 	bits = vmalloc(array_size(sizeof(*bits), args->count));
22044036e387SClaudio Imbrenda 	if (!bits)
22054036e387SClaudio Imbrenda 		return -ENOMEM;
22064036e387SClaudio Imbrenda 
22074036e387SClaudio Imbrenda 	r = copy_from_user(bits, (void __user *)args->values, args->count);
22084036e387SClaudio Imbrenda 	if (r) {
22094036e387SClaudio Imbrenda 		r = -EFAULT;
22104036e387SClaudio Imbrenda 		goto out;
22114036e387SClaudio Imbrenda 	}
22124036e387SClaudio Imbrenda 
2213d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
22144036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
22154036e387SClaudio Imbrenda 	for (i = 0; i < args->count; i++) {
22164036e387SClaudio Imbrenda 		hva = gfn_to_hva(kvm, args->start_gfn + i);
22174036e387SClaudio Imbrenda 		if (kvm_is_error_hva(hva)) {
22184036e387SClaudio Imbrenda 			r = -EFAULT;
22194036e387SClaudio Imbrenda 			break;
22204036e387SClaudio Imbrenda 		}
22214036e387SClaudio Imbrenda 
22224036e387SClaudio Imbrenda 		pgstev = bits[i];
22234036e387SClaudio Imbrenda 		pgstev = pgstev << 24;
22241bab1c02SClaudio Imbrenda 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
22254036e387SClaudio Imbrenda 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
22264036e387SClaudio Imbrenda 	}
22274036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2228d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
22294036e387SClaudio Imbrenda 
2230c9f0a2b8SJanosch Frank 	if (!kvm->mm->context.uses_cmm) {
2231d8ed45c5SMichel Lespinasse 		mmap_write_lock(kvm->mm);
2232c9f0a2b8SJanosch Frank 		kvm->mm->context.uses_cmm = 1;
2233d8ed45c5SMichel Lespinasse 		mmap_write_unlock(kvm->mm);
22344036e387SClaudio Imbrenda 	}
22354036e387SClaudio Imbrenda out:
22364036e387SClaudio Imbrenda 	vfree(bits);
22374036e387SClaudio Imbrenda 	return r;
22384036e387SClaudio Imbrenda }
22394036e387SClaudio Imbrenda 
2240*be48d86fSClaudio Imbrenda /**
2241*be48d86fSClaudio Imbrenda  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2242*be48d86fSClaudio Imbrenda  * non protected.
2243*be48d86fSClaudio Imbrenda  * @kvm: the VM whose protected vCPUs are to be converted
2244*be48d86fSClaudio Imbrenda  * @rc: return value for the RC field of the UVC (in case of error)
2245*be48d86fSClaudio Imbrenda  * @rrc: return value for the RRC field of the UVC (in case of error)
2246*be48d86fSClaudio Imbrenda  *
2247*be48d86fSClaudio Imbrenda  * Does not stop in case of error, tries to convert as many
2248*be48d86fSClaudio Imbrenda  * CPUs as possible. In case of error, the RC and RRC of the last error are
2249*be48d86fSClaudio Imbrenda  * returned.
2250*be48d86fSClaudio Imbrenda  *
2251*be48d86fSClaudio Imbrenda  * Return: 0 in case of success, otherwise -EIO
2252*be48d86fSClaudio Imbrenda  */
2253*be48d86fSClaudio Imbrenda int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
225429b40f10SJanosch Frank {
225529b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
225646808a4cSMarc Zyngier 	unsigned long i;
2257*be48d86fSClaudio Imbrenda 	u16 _rc, _rrc;
2258*be48d86fSClaudio Imbrenda 	int ret = 0;
225929b40f10SJanosch Frank 
226029b40f10SJanosch Frank 	/*
226129b40f10SJanosch Frank 	 * We ignore failures and try to destroy as many CPUs as possible.
226229b40f10SJanosch Frank 	 * At the same time we must not free the assigned resources when
226329b40f10SJanosch Frank 	 * this fails, as the ultravisor has still access to that memory.
226429b40f10SJanosch Frank 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
226529b40f10SJanosch Frank 	 * behind.
226629b40f10SJanosch Frank 	 * We want to return the first failure rc and rrc, though.
226729b40f10SJanosch Frank 	 */
226829b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
226929b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
2270*be48d86fSClaudio Imbrenda 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2271*be48d86fSClaudio Imbrenda 			*rc = _rc;
2272*be48d86fSClaudio Imbrenda 			*rrc = _rrc;
227329b40f10SJanosch Frank 			ret = -EIO;
227429b40f10SJanosch Frank 		}
227529b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
227629b40f10SJanosch Frank 	}
2277ee6a569dSMichael Mueller 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2278ee6a569dSMichael Mueller 	if (use_gisa)
2279ee6a569dSMichael Mueller 		kvm_s390_gisa_enable(kvm);
228029b40f10SJanosch Frank 	return ret;
228129b40f10SJanosch Frank }
228229b40f10SJanosch Frank 
2283*be48d86fSClaudio Imbrenda /**
2284*be48d86fSClaudio Imbrenda  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2285*be48d86fSClaudio Imbrenda  * to protected.
2286*be48d86fSClaudio Imbrenda  * @kvm: the VM whose protected vCPUs are to be converted
2287*be48d86fSClaudio Imbrenda  * @rc: return value for the RC field of the UVC (in case of error)
2288*be48d86fSClaudio Imbrenda  * @rrc: return value for the RRC field of the UVC (in case of error)
2289*be48d86fSClaudio Imbrenda  *
2290*be48d86fSClaudio Imbrenda  * Tries to undo the conversion in case of error.
2291*be48d86fSClaudio Imbrenda  *
2292*be48d86fSClaudio Imbrenda  * Return: 0 in case of success, otherwise -EIO
2293*be48d86fSClaudio Imbrenda  */
229429b40f10SJanosch Frank static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
229529b40f10SJanosch Frank {
229646808a4cSMarc Zyngier 	unsigned long i;
229746808a4cSMarc Zyngier 	int r = 0;
229829b40f10SJanosch Frank 	u16 dummy;
229929b40f10SJanosch Frank 
230029b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
230129b40f10SJanosch Frank 
2302ee6a569dSMichael Mueller 	/* Disable the GISA if the ultravisor does not support AIV. */
2303ee6a569dSMichael Mueller 	if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
2304ee6a569dSMichael Mueller 		kvm_s390_gisa_disable(kvm);
2305ee6a569dSMichael Mueller 
230629b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
230729b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
230829b40f10SJanosch Frank 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
230929b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
231029b40f10SJanosch Frank 		if (r)
231129b40f10SJanosch Frank 			break;
231229b40f10SJanosch Frank 	}
231329b40f10SJanosch Frank 	if (r)
231429b40f10SJanosch Frank 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
231529b40f10SJanosch Frank 	return r;
231629b40f10SJanosch Frank }
231729b40f10SJanosch Frank 
231835d02493SJanosch Frank /*
231935d02493SJanosch Frank  * Here we provide user space with a direct interface to query UV
232035d02493SJanosch Frank  * related data like UV maxima and available features as well as
232135d02493SJanosch Frank  * feature specific data.
232235d02493SJanosch Frank  *
232335d02493SJanosch Frank  * To facilitate future extension of the data structures we'll try to
232435d02493SJanosch Frank  * write data up to the maximum requested length.
232535d02493SJanosch Frank  */
232635d02493SJanosch Frank static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
232735d02493SJanosch Frank {
232835d02493SJanosch Frank 	ssize_t len_min;
232935d02493SJanosch Frank 
233035d02493SJanosch Frank 	switch (info->header.id) {
233135d02493SJanosch Frank 	case KVM_PV_INFO_VM: {
233235d02493SJanosch Frank 		len_min =  sizeof(info->header) + sizeof(info->vm);
233335d02493SJanosch Frank 
233435d02493SJanosch Frank 		if (info->header.len_max < len_min)
233535d02493SJanosch Frank 			return -EINVAL;
233635d02493SJanosch Frank 
233735d02493SJanosch Frank 		memcpy(info->vm.inst_calls_list,
233835d02493SJanosch Frank 		       uv_info.inst_calls_list,
233935d02493SJanosch Frank 		       sizeof(uv_info.inst_calls_list));
234035d02493SJanosch Frank 
234135d02493SJanosch Frank 		/* It's max cpuid not max cpus, so it's off by one */
234235d02493SJanosch Frank 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
234335d02493SJanosch Frank 		info->vm.max_guests = uv_info.max_num_sec_conf;
234435d02493SJanosch Frank 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
234535d02493SJanosch Frank 		info->vm.feature_indication = uv_info.uv_feature_indications;
234635d02493SJanosch Frank 
234735d02493SJanosch Frank 		return len_min;
234835d02493SJanosch Frank 	}
2349fe9a93e0SJanosch Frank 	case KVM_PV_INFO_DUMP: {
2350fe9a93e0SJanosch Frank 		len_min =  sizeof(info->header) + sizeof(info->dump);
2351fe9a93e0SJanosch Frank 
2352fe9a93e0SJanosch Frank 		if (info->header.len_max < len_min)
2353fe9a93e0SJanosch Frank 			return -EINVAL;
2354fe9a93e0SJanosch Frank 
2355fe9a93e0SJanosch Frank 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2356fe9a93e0SJanosch Frank 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2357fe9a93e0SJanosch Frank 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2358fe9a93e0SJanosch Frank 		return len_min;
2359fe9a93e0SJanosch Frank 	}
236035d02493SJanosch Frank 	default:
236135d02493SJanosch Frank 		return -EINVAL;
236235d02493SJanosch Frank 	}
236335d02493SJanosch Frank }
236435d02493SJanosch Frank 
23650460eb35SJanosch Frank static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
23660460eb35SJanosch Frank 			   struct kvm_s390_pv_dmp dmp)
23670460eb35SJanosch Frank {
23680460eb35SJanosch Frank 	int r = -EINVAL;
23690460eb35SJanosch Frank 	void __user *result_buff = (void __user *)dmp.buff_addr;
23700460eb35SJanosch Frank 
23710460eb35SJanosch Frank 	switch (dmp.subcmd) {
23720460eb35SJanosch Frank 	case KVM_PV_DUMP_INIT: {
23730460eb35SJanosch Frank 		if (kvm->arch.pv.dumping)
23740460eb35SJanosch Frank 			break;
23750460eb35SJanosch Frank 
23760460eb35SJanosch Frank 		/*
23770460eb35SJanosch Frank 		 * Block SIE entry as concurrent dump UVCs could lead
23780460eb35SJanosch Frank 		 * to validities.
23790460eb35SJanosch Frank 		 */
23800460eb35SJanosch Frank 		kvm_s390_vcpu_block_all(kvm);
23810460eb35SJanosch Frank 
23820460eb35SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
23830460eb35SJanosch Frank 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
23840460eb35SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
23850460eb35SJanosch Frank 			     cmd->rc, cmd->rrc);
23860460eb35SJanosch Frank 		if (!r) {
23870460eb35SJanosch Frank 			kvm->arch.pv.dumping = true;
23880460eb35SJanosch Frank 		} else {
23890460eb35SJanosch Frank 			kvm_s390_vcpu_unblock_all(kvm);
23900460eb35SJanosch Frank 			r = -EINVAL;
23910460eb35SJanosch Frank 		}
23920460eb35SJanosch Frank 		break;
23930460eb35SJanosch Frank 	}
23940460eb35SJanosch Frank 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
23950460eb35SJanosch Frank 		if (!kvm->arch.pv.dumping)
23960460eb35SJanosch Frank 			break;
23970460eb35SJanosch Frank 
23980460eb35SJanosch Frank 		/*
23990460eb35SJanosch Frank 		 * gaddr is an output parameter since we might stop
24000460eb35SJanosch Frank 		 * early. As dmp will be copied back in our caller, we
24010460eb35SJanosch Frank 		 * don't need to do it ourselves.
24020460eb35SJanosch Frank 		 */
24030460eb35SJanosch Frank 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
24040460eb35SJanosch Frank 						&cmd->rc, &cmd->rrc);
24050460eb35SJanosch Frank 		break;
24060460eb35SJanosch Frank 	}
24070460eb35SJanosch Frank 	case KVM_PV_DUMP_COMPLETE: {
24080460eb35SJanosch Frank 		if (!kvm->arch.pv.dumping)
24090460eb35SJanosch Frank 			break;
24100460eb35SJanosch Frank 
24110460eb35SJanosch Frank 		r = -EINVAL;
24120460eb35SJanosch Frank 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
24130460eb35SJanosch Frank 			break;
24140460eb35SJanosch Frank 
24150460eb35SJanosch Frank 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
24160460eb35SJanosch Frank 					      &cmd->rc, &cmd->rrc);
24170460eb35SJanosch Frank 		break;
24180460eb35SJanosch Frank 	}
24190460eb35SJanosch Frank 	default:
24200460eb35SJanosch Frank 		r = -ENOTTY;
24210460eb35SJanosch Frank 		break;
24220460eb35SJanosch Frank 	}
24230460eb35SJanosch Frank 
24240460eb35SJanosch Frank 	return r;
24250460eb35SJanosch Frank }
24260460eb35SJanosch Frank 
242729b40f10SJanosch Frank static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
242829b40f10SJanosch Frank {
242929b40f10SJanosch Frank 	int r = 0;
243029b40f10SJanosch Frank 	u16 dummy;
243129b40f10SJanosch Frank 	void __user *argp = (void __user *)cmd->data;
243229b40f10SJanosch Frank 
243329b40f10SJanosch Frank 	switch (cmd->cmd) {
243429b40f10SJanosch Frank 	case KVM_PV_ENABLE: {
243529b40f10SJanosch Frank 		r = -EINVAL;
243629b40f10SJanosch Frank 		if (kvm_s390_pv_is_protected(kvm))
243729b40f10SJanosch Frank 			break;
243829b40f10SJanosch Frank 
243929b40f10SJanosch Frank 		/*
244029b40f10SJanosch Frank 		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
244129b40f10SJanosch Frank 		 *  esca, we need no cleanup in the error cases below
244229b40f10SJanosch Frank 		 */
244329b40f10SJanosch Frank 		r = sca_switch_to_extended(kvm);
244429b40f10SJanosch Frank 		if (r)
244529b40f10SJanosch Frank 			break;
244629b40f10SJanosch Frank 
2447d8ed45c5SMichel Lespinasse 		mmap_write_lock(current->mm);
2448fa0c5eabSJanosch Frank 		r = gmap_mark_unmergeable();
2449d8ed45c5SMichel Lespinasse 		mmap_write_unlock(current->mm);
2450fa0c5eabSJanosch Frank 		if (r)
2451fa0c5eabSJanosch Frank 			break;
2452fa0c5eabSJanosch Frank 
245329b40f10SJanosch Frank 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
245429b40f10SJanosch Frank 		if (r)
245529b40f10SJanosch Frank 			break;
245629b40f10SJanosch Frank 
245729b40f10SJanosch Frank 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
245829b40f10SJanosch Frank 		if (r)
245929b40f10SJanosch Frank 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
24600890ddeaSChristian Borntraeger 
24610890ddeaSChristian Borntraeger 		/* we need to block service interrupts from now on */
24620890ddeaSChristian Borntraeger 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
246329b40f10SJanosch Frank 		break;
246429b40f10SJanosch Frank 	}
246529b40f10SJanosch Frank 	case KVM_PV_DISABLE: {
246629b40f10SJanosch Frank 		r = -EINVAL;
246729b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
246829b40f10SJanosch Frank 			break;
246929b40f10SJanosch Frank 
247029b40f10SJanosch Frank 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
247129b40f10SJanosch Frank 		/*
247229b40f10SJanosch Frank 		 * If a CPU could not be destroyed, destroy VM will also fail.
247329b40f10SJanosch Frank 		 * There is no point in trying to destroy it. Instead return
247429b40f10SJanosch Frank 		 * the rc and rrc from the first CPU that failed destroying.
247529b40f10SJanosch Frank 		 */
247629b40f10SJanosch Frank 		if (r)
247729b40f10SJanosch Frank 			break;
247829b40f10SJanosch Frank 		r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
24790890ddeaSChristian Borntraeger 
24800890ddeaSChristian Borntraeger 		/* no need to block service interrupts any more */
24810890ddeaSChristian Borntraeger 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
248229b40f10SJanosch Frank 		break;
248329b40f10SJanosch Frank 	}
248429b40f10SJanosch Frank 	case KVM_PV_SET_SEC_PARMS: {
248529b40f10SJanosch Frank 		struct kvm_s390_pv_sec_parm parms = {};
248629b40f10SJanosch Frank 		void *hdr;
248729b40f10SJanosch Frank 
248829b40f10SJanosch Frank 		r = -EINVAL;
248929b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
249029b40f10SJanosch Frank 			break;
249129b40f10SJanosch Frank 
249229b40f10SJanosch Frank 		r = -EFAULT;
249329b40f10SJanosch Frank 		if (copy_from_user(&parms, argp, sizeof(parms)))
249429b40f10SJanosch Frank 			break;
249529b40f10SJanosch Frank 
249629b40f10SJanosch Frank 		/* Currently restricted to 8KB */
249729b40f10SJanosch Frank 		r = -EINVAL;
249829b40f10SJanosch Frank 		if (parms.length > PAGE_SIZE * 2)
249929b40f10SJanosch Frank 			break;
250029b40f10SJanosch Frank 
250129b40f10SJanosch Frank 		r = -ENOMEM;
250229b40f10SJanosch Frank 		hdr = vmalloc(parms.length);
250329b40f10SJanosch Frank 		if (!hdr)
250429b40f10SJanosch Frank 			break;
250529b40f10SJanosch Frank 
250629b40f10SJanosch Frank 		r = -EFAULT;
250729b40f10SJanosch Frank 		if (!copy_from_user(hdr, (void __user *)parms.origin,
250829b40f10SJanosch Frank 				    parms.length))
250929b40f10SJanosch Frank 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
251029b40f10SJanosch Frank 						      &cmd->rc, &cmd->rrc);
251129b40f10SJanosch Frank 
251229b40f10SJanosch Frank 		vfree(hdr);
251329b40f10SJanosch Frank 		break;
251429b40f10SJanosch Frank 	}
251529b40f10SJanosch Frank 	case KVM_PV_UNPACK: {
251629b40f10SJanosch Frank 		struct kvm_s390_pv_unp unp = {};
251729b40f10SJanosch Frank 
251829b40f10SJanosch Frank 		r = -EINVAL;
25191ed576a2SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
252029b40f10SJanosch Frank 			break;
252129b40f10SJanosch Frank 
252229b40f10SJanosch Frank 		r = -EFAULT;
252329b40f10SJanosch Frank 		if (copy_from_user(&unp, argp, sizeof(unp)))
252429b40f10SJanosch Frank 			break;
252529b40f10SJanosch Frank 
252629b40f10SJanosch Frank 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
252729b40f10SJanosch Frank 				       &cmd->rc, &cmd->rrc);
252829b40f10SJanosch Frank 		break;
252929b40f10SJanosch Frank 	}
253029b40f10SJanosch Frank 	case KVM_PV_VERIFY: {
253129b40f10SJanosch Frank 		r = -EINVAL;
253229b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
253329b40f10SJanosch Frank 			break;
253429b40f10SJanosch Frank 
253529b40f10SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
253629b40f10SJanosch Frank 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
253729b40f10SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
253829b40f10SJanosch Frank 			     cmd->rrc);
253929b40f10SJanosch Frank 		break;
254029b40f10SJanosch Frank 	}
2541e0d2773dSJanosch Frank 	case KVM_PV_PREP_RESET: {
2542e0d2773dSJanosch Frank 		r = -EINVAL;
2543e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2544e0d2773dSJanosch Frank 			break;
2545e0d2773dSJanosch Frank 
2546e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2547e0d2773dSJanosch Frank 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2548e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2549e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2550e0d2773dSJanosch Frank 		break;
2551e0d2773dSJanosch Frank 	}
2552e0d2773dSJanosch Frank 	case KVM_PV_UNSHARE_ALL: {
2553e0d2773dSJanosch Frank 		r = -EINVAL;
2554e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2555e0d2773dSJanosch Frank 			break;
2556e0d2773dSJanosch Frank 
2557e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2558e0d2773dSJanosch Frank 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2559e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2560e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2561e0d2773dSJanosch Frank 		break;
2562e0d2773dSJanosch Frank 	}
256335d02493SJanosch Frank 	case KVM_PV_INFO: {
256435d02493SJanosch Frank 		struct kvm_s390_pv_info info = {};
256535d02493SJanosch Frank 		ssize_t data_len;
256635d02493SJanosch Frank 
256735d02493SJanosch Frank 		/*
256835d02493SJanosch Frank 		 * No need to check the VM protection here.
256935d02493SJanosch Frank 		 *
257035d02493SJanosch Frank 		 * Maybe user space wants to query some of the data
257135d02493SJanosch Frank 		 * when the VM is still unprotected. If we see the
257235d02493SJanosch Frank 		 * need to fence a new data command we can still
257335d02493SJanosch Frank 		 * return an error in the info handler.
257435d02493SJanosch Frank 		 */
257535d02493SJanosch Frank 
257635d02493SJanosch Frank 		r = -EFAULT;
257735d02493SJanosch Frank 		if (copy_from_user(&info, argp, sizeof(info.header)))
257835d02493SJanosch Frank 			break;
257935d02493SJanosch Frank 
258035d02493SJanosch Frank 		r = -EINVAL;
258135d02493SJanosch Frank 		if (info.header.len_max < sizeof(info.header))
258235d02493SJanosch Frank 			break;
258335d02493SJanosch Frank 
258435d02493SJanosch Frank 		data_len = kvm_s390_handle_pv_info(&info);
258535d02493SJanosch Frank 		if (data_len < 0) {
258635d02493SJanosch Frank 			r = data_len;
258735d02493SJanosch Frank 			break;
258835d02493SJanosch Frank 		}
258935d02493SJanosch Frank 		/*
259035d02493SJanosch Frank 		 * If a data command struct is extended (multiple
259135d02493SJanosch Frank 		 * times) this can be used to determine how much of it
259235d02493SJanosch Frank 		 * is valid.
259335d02493SJanosch Frank 		 */
259435d02493SJanosch Frank 		info.header.len_written = data_len;
259535d02493SJanosch Frank 
259635d02493SJanosch Frank 		r = -EFAULT;
259735d02493SJanosch Frank 		if (copy_to_user(argp, &info, data_len))
259835d02493SJanosch Frank 			break;
259935d02493SJanosch Frank 
260035d02493SJanosch Frank 		r = 0;
260135d02493SJanosch Frank 		break;
260235d02493SJanosch Frank 	}
26030460eb35SJanosch Frank 	case KVM_PV_DUMP: {
26040460eb35SJanosch Frank 		struct kvm_s390_pv_dmp dmp;
26050460eb35SJanosch Frank 
26060460eb35SJanosch Frank 		r = -EINVAL;
26070460eb35SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
26080460eb35SJanosch Frank 			break;
26090460eb35SJanosch Frank 
26100460eb35SJanosch Frank 		r = -EFAULT;
26110460eb35SJanosch Frank 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
26120460eb35SJanosch Frank 			break;
26130460eb35SJanosch Frank 
26140460eb35SJanosch Frank 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
26150460eb35SJanosch Frank 		if (r)
26160460eb35SJanosch Frank 			break;
26170460eb35SJanosch Frank 
26180460eb35SJanosch Frank 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
26190460eb35SJanosch Frank 			r = -EFAULT;
26200460eb35SJanosch Frank 			break;
26210460eb35SJanosch Frank 		}
26220460eb35SJanosch Frank 
26230460eb35SJanosch Frank 		break;
26240460eb35SJanosch Frank 	}
262529b40f10SJanosch Frank 	default:
262629b40f10SJanosch Frank 		r = -ENOTTY;
262729b40f10SJanosch Frank 	}
262829b40f10SJanosch Frank 	return r;
262929b40f10SJanosch Frank }
263029b40f10SJanosch Frank 
2631e9e9feebSJanis Schoetterl-Glausch static bool access_key_invalid(u8 access_key)
2632e9e9feebSJanis Schoetterl-Glausch {
2633e9e9feebSJanis Schoetterl-Glausch 	return access_key > 0xf;
2634e9e9feebSJanis Schoetterl-Glausch }
2635e9e9feebSJanis Schoetterl-Glausch 
2636ef11c946SJanis Schoetterl-Glausch static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2637ef11c946SJanis Schoetterl-Glausch {
2638ef11c946SJanis Schoetterl-Glausch 	void __user *uaddr = (void __user *)mop->buf;
2639ef11c946SJanis Schoetterl-Glausch 	u64 supported_flags;
2640ef11c946SJanis Schoetterl-Glausch 	void *tmpbuf = NULL;
2641ef11c946SJanis Schoetterl-Glausch 	int r, srcu_idx;
2642ef11c946SJanis Schoetterl-Glausch 
2643ef11c946SJanis Schoetterl-Glausch 	supported_flags = KVM_S390_MEMOP_F_SKEY_PROTECTION
2644ef11c946SJanis Schoetterl-Glausch 			  | KVM_S390_MEMOP_F_CHECK_ONLY;
26453d9042f8SJanis Schoetterl-Glausch 	if (mop->flags & ~supported_flags || !mop->size)
2646ef11c946SJanis Schoetterl-Glausch 		return -EINVAL;
2647ef11c946SJanis Schoetterl-Glausch 	if (mop->size > MEM_OP_MAX_SIZE)
2648ef11c946SJanis Schoetterl-Glausch 		return -E2BIG;
2649b5d12744SJanis Schoetterl-Glausch 	/*
2650b5d12744SJanis Schoetterl-Glausch 	 * This is technically a heuristic only, if the kvm->lock is not
2651b5d12744SJanis Schoetterl-Glausch 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2652b5d12744SJanis Schoetterl-Glausch 	 * This is ok from a kernel perspective, wrongdoing is detected
2653b5d12744SJanis Schoetterl-Glausch 	 * on the access, -EFAULT is returned and the vm may crash the
2654b5d12744SJanis Schoetterl-Glausch 	 * next time it accesses the memory in question.
2655b5d12744SJanis Schoetterl-Glausch 	 * There is no sane usecase to do switching and a memop on two
2656b5d12744SJanis Schoetterl-Glausch 	 * different CPUs at the same time.
2657b5d12744SJanis Schoetterl-Glausch 	 */
2658b5d12744SJanis Schoetterl-Glausch 	if (kvm_s390_pv_get_handle(kvm))
2659ef11c946SJanis Schoetterl-Glausch 		return -EINVAL;
2660ef11c946SJanis Schoetterl-Glausch 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2661ef11c946SJanis Schoetterl-Glausch 		if (access_key_invalid(mop->key))
2662ef11c946SJanis Schoetterl-Glausch 			return -EINVAL;
2663ef11c946SJanis Schoetterl-Glausch 	} else {
2664ef11c946SJanis Schoetterl-Glausch 		mop->key = 0;
2665ef11c946SJanis Schoetterl-Glausch 	}
2666ef11c946SJanis Schoetterl-Glausch 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2667ef11c946SJanis Schoetterl-Glausch 		tmpbuf = vmalloc(mop->size);
2668ef11c946SJanis Schoetterl-Glausch 		if (!tmpbuf)
2669ef11c946SJanis Schoetterl-Glausch 			return -ENOMEM;
2670ef11c946SJanis Schoetterl-Glausch 	}
2671ef11c946SJanis Schoetterl-Glausch 
2672ef11c946SJanis Schoetterl-Glausch 	srcu_idx = srcu_read_lock(&kvm->srcu);
2673ef11c946SJanis Schoetterl-Glausch 
2674ef11c946SJanis Schoetterl-Glausch 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2675ef11c946SJanis Schoetterl-Glausch 		r = PGM_ADDRESSING;
2676ef11c946SJanis Schoetterl-Glausch 		goto out_unlock;
2677ef11c946SJanis Schoetterl-Glausch 	}
2678ef11c946SJanis Schoetterl-Glausch 
2679ef11c946SJanis Schoetterl-Glausch 	switch (mop->op) {
2680ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEMOP_ABSOLUTE_READ: {
2681ef11c946SJanis Schoetterl-Glausch 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2682ef11c946SJanis Schoetterl-Glausch 			r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_FETCH, mop->key);
2683ef11c946SJanis Schoetterl-Glausch 		} else {
2684ef11c946SJanis Schoetterl-Glausch 			r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2685ef11c946SJanis Schoetterl-Glausch 						      mop->size, GACC_FETCH, mop->key);
2686ef11c946SJanis Schoetterl-Glausch 			if (r == 0) {
2687ef11c946SJanis Schoetterl-Glausch 				if (copy_to_user(uaddr, tmpbuf, mop->size))
2688ef11c946SJanis Schoetterl-Glausch 					r = -EFAULT;
2689ef11c946SJanis Schoetterl-Glausch 			}
2690ef11c946SJanis Schoetterl-Glausch 		}
2691ef11c946SJanis Schoetterl-Glausch 		break;
2692ef11c946SJanis Schoetterl-Glausch 	}
2693ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEMOP_ABSOLUTE_WRITE: {
2694ef11c946SJanis Schoetterl-Glausch 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2695ef11c946SJanis Schoetterl-Glausch 			r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_STORE, mop->key);
2696ef11c946SJanis Schoetterl-Glausch 		} else {
2697ef11c946SJanis Schoetterl-Glausch 			if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2698ef11c946SJanis Schoetterl-Glausch 				r = -EFAULT;
2699ef11c946SJanis Schoetterl-Glausch 				break;
2700ef11c946SJanis Schoetterl-Glausch 			}
2701ef11c946SJanis Schoetterl-Glausch 			r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2702ef11c946SJanis Schoetterl-Glausch 						      mop->size, GACC_STORE, mop->key);
2703ef11c946SJanis Schoetterl-Glausch 		}
2704ef11c946SJanis Schoetterl-Glausch 		break;
2705ef11c946SJanis Schoetterl-Glausch 	}
2706ef11c946SJanis Schoetterl-Glausch 	default:
2707ef11c946SJanis Schoetterl-Glausch 		r = -EINVAL;
2708ef11c946SJanis Schoetterl-Glausch 	}
2709ef11c946SJanis Schoetterl-Glausch 
2710ef11c946SJanis Schoetterl-Glausch out_unlock:
2711ef11c946SJanis Schoetterl-Glausch 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2712ef11c946SJanis Schoetterl-Glausch 
2713ef11c946SJanis Schoetterl-Glausch 	vfree(tmpbuf);
2714ef11c946SJanis Schoetterl-Glausch 	return r;
2715ef11c946SJanis Schoetterl-Glausch }
2716ef11c946SJanis Schoetterl-Glausch 
2717b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
2718b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
2719b0c632dbSHeiko Carstens {
2720b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
2721b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2722f2061656SDominik Dingel 	struct kvm_device_attr attr;
2723b0c632dbSHeiko Carstens 	int r;
2724b0c632dbSHeiko Carstens 
2725b0c632dbSHeiko Carstens 	switch (ioctl) {
2726ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
2727ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2728ba5c1e9bSCarsten Otte 
2729ba5c1e9bSCarsten Otte 		r = -EFAULT;
2730ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2731ba5c1e9bSCarsten Otte 			break;
2732ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
2733ba5c1e9bSCarsten Otte 		break;
2734ba5c1e9bSCarsten Otte 	}
273584223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
273684223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
273784223598SCornelia Huck 
273884223598SCornelia Huck 		r = -EINVAL;
273984223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
274084223598SCornelia Huck 			/* Set up dummy routing. */
274184223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
2742152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
274384223598SCornelia Huck 		}
274484223598SCornelia Huck 		break;
274584223598SCornelia Huck 	}
2746f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
2747f2061656SDominik Dingel 		r = -EFAULT;
2748f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2749f2061656SDominik Dingel 			break;
2750f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
2751f2061656SDominik Dingel 		break;
2752f2061656SDominik Dingel 	}
2753f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
2754f2061656SDominik Dingel 		r = -EFAULT;
2755f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2756f2061656SDominik Dingel 			break;
2757f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
2758f2061656SDominik Dingel 		break;
2759f2061656SDominik Dingel 	}
2760f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
2761f2061656SDominik Dingel 		r = -EFAULT;
2762f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2763f2061656SDominik Dingel 			break;
2764f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
2765f2061656SDominik Dingel 		break;
2766f2061656SDominik Dingel 	}
276730ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
276830ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
276930ee2a98SJason J. Herne 
277030ee2a98SJason J. Herne 		r = -EFAULT;
277130ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
277230ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
277330ee2a98SJason J. Herne 			break;
277430ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
277530ee2a98SJason J. Herne 		break;
277630ee2a98SJason J. Herne 	}
277730ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
277830ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
277930ee2a98SJason J. Herne 
278030ee2a98SJason J. Herne 		r = -EFAULT;
278130ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
278230ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
278330ee2a98SJason J. Herne 			break;
278430ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
278530ee2a98SJason J. Herne 		break;
278630ee2a98SJason J. Herne 	}
27874036e387SClaudio Imbrenda 	case KVM_S390_GET_CMMA_BITS: {
27884036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
27894036e387SClaudio Imbrenda 
27904036e387SClaudio Imbrenda 		r = -EFAULT;
27914036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
27924036e387SClaudio Imbrenda 			break;
27931de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
27944036e387SClaudio Imbrenda 		r = kvm_s390_get_cmma_bits(kvm, &args);
27951de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
27964036e387SClaudio Imbrenda 		if (!r) {
27974036e387SClaudio Imbrenda 			r = copy_to_user(argp, &args, sizeof(args));
27984036e387SClaudio Imbrenda 			if (r)
27994036e387SClaudio Imbrenda 				r = -EFAULT;
28004036e387SClaudio Imbrenda 		}
28014036e387SClaudio Imbrenda 		break;
28024036e387SClaudio Imbrenda 	}
28034036e387SClaudio Imbrenda 	case KVM_S390_SET_CMMA_BITS: {
28044036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
28054036e387SClaudio Imbrenda 
28064036e387SClaudio Imbrenda 		r = -EFAULT;
28074036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
28084036e387SClaudio Imbrenda 			break;
28091de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
28104036e387SClaudio Imbrenda 		r = kvm_s390_set_cmma_bits(kvm, &args);
28111de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
28124036e387SClaudio Imbrenda 		break;
28134036e387SClaudio Imbrenda 	}
281429b40f10SJanosch Frank 	case KVM_S390_PV_COMMAND: {
281529b40f10SJanosch Frank 		struct kvm_pv_cmd args;
281629b40f10SJanosch Frank 
281767cf68b6SEric Farman 		/* protvirt means user cpu state */
281867cf68b6SEric Farman 		kvm_s390_set_user_cpu_state_ctrl(kvm);
281929b40f10SJanosch Frank 		r = 0;
282029b40f10SJanosch Frank 		if (!is_prot_virt_host()) {
282129b40f10SJanosch Frank 			r = -EINVAL;
282229b40f10SJanosch Frank 			break;
282329b40f10SJanosch Frank 		}
282429b40f10SJanosch Frank 		if (copy_from_user(&args, argp, sizeof(args))) {
282529b40f10SJanosch Frank 			r = -EFAULT;
282629b40f10SJanosch Frank 			break;
282729b40f10SJanosch Frank 		}
282829b40f10SJanosch Frank 		if (args.flags) {
282929b40f10SJanosch Frank 			r = -EINVAL;
283029b40f10SJanosch Frank 			break;
283129b40f10SJanosch Frank 		}
283229b40f10SJanosch Frank 		mutex_lock(&kvm->lock);
283329b40f10SJanosch Frank 		r = kvm_s390_handle_pv(kvm, &args);
283429b40f10SJanosch Frank 		mutex_unlock(&kvm->lock);
283529b40f10SJanosch Frank 		if (copy_to_user(argp, &args, sizeof(args))) {
283629b40f10SJanosch Frank 			r = -EFAULT;
283729b40f10SJanosch Frank 			break;
283829b40f10SJanosch Frank 		}
283929b40f10SJanosch Frank 		break;
284029b40f10SJanosch Frank 	}
2841ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEM_OP: {
2842ef11c946SJanis Schoetterl-Glausch 		struct kvm_s390_mem_op mem_op;
2843ef11c946SJanis Schoetterl-Glausch 
2844ef11c946SJanis Schoetterl-Glausch 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2845ef11c946SJanis Schoetterl-Glausch 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
2846ef11c946SJanis Schoetterl-Glausch 		else
2847ef11c946SJanis Schoetterl-Glausch 			r = -EFAULT;
2848ef11c946SJanis Schoetterl-Glausch 		break;
2849ef11c946SJanis Schoetterl-Glausch 	}
2850db1c875eSMatthew Rosato 	case KVM_S390_ZPCI_OP: {
2851db1c875eSMatthew Rosato 		struct kvm_s390_zpci_op args;
2852db1c875eSMatthew Rosato 
2853db1c875eSMatthew Rosato 		r = -EINVAL;
2854db1c875eSMatthew Rosato 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
2855db1c875eSMatthew Rosato 			break;
2856db1c875eSMatthew Rosato 		if (copy_from_user(&args, argp, sizeof(args))) {
2857db1c875eSMatthew Rosato 			r = -EFAULT;
2858db1c875eSMatthew Rosato 			break;
2859db1c875eSMatthew Rosato 		}
2860db1c875eSMatthew Rosato 		r = kvm_s390_pci_zpci_op(kvm, &args);
2861db1c875eSMatthew Rosato 		break;
2862db1c875eSMatthew Rosato 	}
2863b0c632dbSHeiko Carstens 	default:
2864367e1319SAvi Kivity 		r = -ENOTTY;
2865b0c632dbSHeiko Carstens 	}
2866b0c632dbSHeiko Carstens 
2867b0c632dbSHeiko Carstens 	return r;
2868b0c632dbSHeiko Carstens }
2869b0c632dbSHeiko Carstens 
287045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
287145c9b47cSTony Krowiak {
2872e585b24aSTony Krowiak 	struct ap_config_info info;
287345c9b47cSTony Krowiak 
2874e585b24aSTony Krowiak 	if (ap_instructions_available()) {
2875e585b24aSTony Krowiak 		if (ap_qci(&info) == 0)
2876e585b24aSTony Krowiak 			return info.apxa;
287745c9b47cSTony Krowiak 	}
287845c9b47cSTony Krowiak 
287945c9b47cSTony Krowiak 	return 0;
288045c9b47cSTony Krowiak }
288145c9b47cSTony Krowiak 
2882e585b24aSTony Krowiak /*
2883e585b24aSTony Krowiak  * The format of the crypto control block (CRYCB) is specified in the 3 low
2884e585b24aSTony Krowiak  * order bits of the CRYCB designation (CRYCBD) field as follows:
2885e585b24aSTony Krowiak  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2886e585b24aSTony Krowiak  *	     AP extended addressing (APXA) facility are installed.
2887e585b24aSTony Krowiak  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2888e585b24aSTony Krowiak  * Format 2: Both the APXA and MSAX3 facilities are installed
2889e585b24aSTony Krowiak  */
289045c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
289145c9b47cSTony Krowiak {
289245c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
289345c9b47cSTony Krowiak 
2894e585b24aSTony Krowiak 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
2895e585b24aSTony Krowiak 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2896e585b24aSTony Krowiak 
2897e585b24aSTony Krowiak 	/* Check whether MSAX3 is installed */
2898e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
2899e585b24aSTony Krowiak 		return;
2900e585b24aSTony Krowiak 
290145c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
290245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
290345c9b47cSTony Krowiak 	else
290445c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
290545c9b47cSTony Krowiak }
290645c9b47cSTony Krowiak 
290786956e70STony Krowiak /*
290886956e70STony Krowiak  * kvm_arch_crypto_set_masks
290986956e70STony Krowiak  *
291086956e70STony Krowiak  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
291186956e70STony Krowiak  *	 to be set.
291286956e70STony Krowiak  * @apm: the mask identifying the accessible AP adapters
291386956e70STony Krowiak  * @aqm: the mask identifying the accessible AP domains
291486956e70STony Krowiak  * @adm: the mask identifying the accessible AP control domains
291586956e70STony Krowiak  *
291686956e70STony Krowiak  * Set the masks that identify the adapters, domains and control domains to
291786956e70STony Krowiak  * which the KVM guest is granted access.
291886956e70STony Krowiak  *
291986956e70STony Krowiak  * Note: The kvm->lock mutex must be locked by the caller before invoking this
292086956e70STony Krowiak  *	 function.
292186956e70STony Krowiak  */
29220e237e44SPierre Morel void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
29230e237e44SPierre Morel 			       unsigned long *aqm, unsigned long *adm)
29240e237e44SPierre Morel {
29250e237e44SPierre Morel 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
29260e237e44SPierre Morel 
29270e237e44SPierre Morel 	kvm_s390_vcpu_block_all(kvm);
29280e237e44SPierre Morel 
29290e237e44SPierre Morel 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
29300e237e44SPierre Morel 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
29310e237e44SPierre Morel 		memcpy(crycb->apcb1.apm, apm, 32);
29320e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
29330e237e44SPierre Morel 			 apm[0], apm[1], apm[2], apm[3]);
29340e237e44SPierre Morel 		memcpy(crycb->apcb1.aqm, aqm, 32);
29350e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
29360e237e44SPierre Morel 			 aqm[0], aqm[1], aqm[2], aqm[3]);
29370e237e44SPierre Morel 		memcpy(crycb->apcb1.adm, adm, 32);
29380e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
29390e237e44SPierre Morel 			 adm[0], adm[1], adm[2], adm[3]);
29400e237e44SPierre Morel 		break;
29410e237e44SPierre Morel 	case CRYCB_FORMAT1:
29420e237e44SPierre Morel 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
29430e237e44SPierre Morel 		memcpy(crycb->apcb0.apm, apm, 8);
29440e237e44SPierre Morel 		memcpy(crycb->apcb0.aqm, aqm, 2);
29450e237e44SPierre Morel 		memcpy(crycb->apcb0.adm, adm, 2);
29460e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
29470e237e44SPierre Morel 			 apm[0], *((unsigned short *)aqm),
29480e237e44SPierre Morel 			 *((unsigned short *)adm));
29490e237e44SPierre Morel 		break;
29500e237e44SPierre Morel 	default:	/* Can not happen */
29510e237e44SPierre Morel 		break;
29520e237e44SPierre Morel 	}
29530e237e44SPierre Morel 
29540e237e44SPierre Morel 	/* recreate the shadow crycb for each vcpu */
29550e237e44SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
29560e237e44SPierre Morel 	kvm_s390_vcpu_unblock_all(kvm);
29570e237e44SPierre Morel }
29580e237e44SPierre Morel EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
29590e237e44SPierre Morel 
296086956e70STony Krowiak /*
296186956e70STony Krowiak  * kvm_arch_crypto_clear_masks
296286956e70STony Krowiak  *
296386956e70STony Krowiak  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
296486956e70STony Krowiak  *	 to be cleared.
296586956e70STony Krowiak  *
296686956e70STony Krowiak  * Clear the masks that identify the adapters, domains and control domains to
296786956e70STony Krowiak  * which the KVM guest is granted access.
296886956e70STony Krowiak  *
296986956e70STony Krowiak  * Note: The kvm->lock mutex must be locked by the caller before invoking this
297086956e70STony Krowiak  *	 function.
297186956e70STony Krowiak  */
297242104598STony Krowiak void kvm_arch_crypto_clear_masks(struct kvm *kvm)
297342104598STony Krowiak {
297442104598STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
297542104598STony Krowiak 
297642104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
297742104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb0));
297842104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
297942104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb1));
298042104598STony Krowiak 
29810e237e44SPierre Morel 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
29826cc571b1SPierre Morel 	/* recreate the shadow crycb for each vcpu */
29836cc571b1SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
298442104598STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
298542104598STony Krowiak }
298642104598STony Krowiak EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
298742104598STony Krowiak 
29889bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
29899d8d5786SMichael Mueller {
29909bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
29919bb0ec09SDavid Hildenbrand 
29929bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
29939bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
29949bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
29959d8d5786SMichael Mueller }
29969d8d5786SMichael Mueller 
2997c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
29985102ee87STony Krowiak {
2999c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
300045c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
30011e753732STony Krowiak 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
30025102ee87STony Krowiak 
3003e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
3004e585b24aSTony Krowiak 		return;
3005e585b24aSTony Krowiak 
3006ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
3007ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
3008ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
3009ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3010ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3011ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3012ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
30135102ee87STony Krowiak }
30145102ee87STony Krowiak 
30157d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
30167d43bafcSEugene (jno) Dvurechenski {
30177d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
30185e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
30197d43bafcSEugene (jno) Dvurechenski 	else
30207d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
30217d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
30227d43bafcSEugene (jno) Dvurechenski }
30237d43bafcSEugene (jno) Dvurechenski 
302409340b2fSMatthew Rosato void kvm_arch_free_vm(struct kvm *kvm)
302509340b2fSMatthew Rosato {
302609340b2fSMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
302709340b2fSMatthew Rosato 		kvm_s390_pci_clear_list(kvm);
302809340b2fSMatthew Rosato 
302909340b2fSMatthew Rosato 	__kvm_arch_free_vm(kvm);
303009340b2fSMatthew Rosato }
303109340b2fSMatthew Rosato 
3032e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3033b0c632dbSHeiko Carstens {
3034c4196218SChristian Borntraeger 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
30359d8d5786SMichael Mueller 	int i, rc;
3036b0c632dbSHeiko Carstens 	char debug_name[16];
3037f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
3038b0c632dbSHeiko Carstens 
3039e08b9637SCarsten Otte 	rc = -EINVAL;
3040e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3041e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
3042e08b9637SCarsten Otte 		goto out_err;
3043e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3044e08b9637SCarsten Otte 		goto out_err;
3045e08b9637SCarsten Otte #else
3046e08b9637SCarsten Otte 	if (type)
3047e08b9637SCarsten Otte 		goto out_err;
3048e08b9637SCarsten Otte #endif
3049e08b9637SCarsten Otte 
3050b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
3051b0c632dbSHeiko Carstens 	if (rc)
3052d89f5effSJan Kiszka 		goto out_err;
3053b0c632dbSHeiko Carstens 
3054b290411aSCarsten Otte 	rc = -ENOMEM;
3055b290411aSCarsten Otte 
305676a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
305776a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
30585e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
30599ac96d75SDavid Hildenbrand 	/* start with basic SCA */
306076a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3061b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
3062d89f5effSJan Kiszka 		goto out_err;
30630d9ce162SJunaid Shahid 	mutex_lock(&kvm_lock);
3064c5c2c393SDavid Hildenbrand 	sca_offset += 16;
3065bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3066c5c2c393SDavid Hildenbrand 		sca_offset = 0;
3067bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
3068bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
30690d9ce162SJunaid Shahid 	mutex_unlock(&kvm_lock);
3070b0c632dbSHeiko Carstens 
3071b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
3072b0c632dbSHeiko Carstens 
30731cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3074b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
307540f5b735SDominik Dingel 		goto out_err;
3076b0c632dbSHeiko Carstens 
307719114bebSMichael Mueller 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3078c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
3079c4196218SChristian Borntraeger 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3080c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
308140f5b735SDominik Dingel 		goto out_err;
30829d8d5786SMichael Mueller 
308325c84dbaSMichael Mueller 	kvm->arch.sie_page2->kvm = kvm;
3084c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3085c3b9e3e1SChristian Borntraeger 
3086c3b9e3e1SChristian Borntraeger 	for (i = 0; i < kvm_s390_fac_size(); i++) {
308717e89e13SSven Schnelle 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3088c3b9e3e1SChristian Borntraeger 					      (kvm_s390_fac_base[i] |
3089c3b9e3e1SChristian Borntraeger 					       kvm_s390_fac_ext[i]);
309017e89e13SSven Schnelle 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3091c3b9e3e1SChristian Borntraeger 					      kvm_s390_fac_base[i];
3092c3b9e3e1SChristian Borntraeger 	}
3093346fa2f8SChristian Borntraeger 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3094981467c9SMichael Mueller 
30951935222dSDavid Hildenbrand 	/* we are always in czam mode - even on pre z14 machines */
30961935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
30971935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_list, 138);
30981935222dSDavid Hildenbrand 	/* we emulate STHYI in kvm */
309995ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
310095ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
31011bab1c02SClaudio Imbrenda 	if (MACHINE_HAS_TLB_GUEST) {
31021bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
31031bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_list, 147);
31041bab1c02SClaudio Imbrenda 	}
310595ca2cb5SJanosch Frank 
310605f31e3bSPierre Morel 	if (css_general_characteristics.aiv && test_facility(65))
310705f31e3bSPierre Morel 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
310805f31e3bSPierre Morel 
31099bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
311037c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
31119d8d5786SMichael Mueller 
3112c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
31135102ee87STony Krowiak 
311409340b2fSMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
311509340b2fSMatthew Rosato 		mutex_lock(&kvm->lock);
311609340b2fSMatthew Rosato 		kvm_s390_pci_init_list(kvm);
311709340b2fSMatthew Rosato 		kvm_s390_vcpu_pci_enable_interp(kvm);
311809340b2fSMatthew Rosato 		mutex_unlock(&kvm->lock);
311909340b2fSMatthew Rosato 	}
312009340b2fSMatthew Rosato 
312151978393SFei Li 	mutex_init(&kvm->arch.float_int.ais_lock);
3122ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
31236d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
31246d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
31258a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
3126a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
3127ba5c1e9bSCarsten Otte 
3128b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
312978f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3130b0c632dbSHeiko Carstens 
3131e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
3132e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
3133a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3134e08b9637SCarsten Otte 	} else {
313532e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
3136ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = TASK_SIZE_MAX;
313732e6b236SGuenther Hutzl 		else
3138ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
313932e6b236SGuenther Hutzl 						    sclp.hamax + 1);
31406ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3141598841caSCarsten Otte 		if (!kvm->arch.gmap)
314240f5b735SDominik Dingel 			goto out_err;
31432c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
314424eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
3145e08b9637SCarsten Otte 	}
3146fa6b7fe9SCornelia Huck 
3147c9f0a2b8SJanosch Frank 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
314855531b74SJanosch Frank 	kvm->arch.use_skf = sclp.has_skey;
31498ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
3150a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
3151cc674ef2SMichael Mueller 	if (use_gisa)
3152d7c5cb01SMichael Mueller 		kvm_s390_gisa_init(kvm);
31538335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
31548ad35755SDavid Hildenbrand 
3155d89f5effSJan Kiszka 	return 0;
3156d89f5effSJan Kiszka out_err:
3157c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
315840f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
31597d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
316078f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3161d89f5effSJan Kiszka 	return rc;
3162b0c632dbSHeiko Carstens }
3163b0c632dbSHeiko Carstens 
3164d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3165d329c035SChristian Borntraeger {
316629b40f10SJanosch Frank 	u16 rc, rrc;
316729b40f10SJanosch Frank 
3168d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3169ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
317067335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
31713c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
3172bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
3173a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
317427e0393fSCarsten Otte 
317527e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
31766ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
317727e0393fSCarsten Otte 
3178e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
3179b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
318029b40f10SJanosch Frank 	/* We can not hold the vcpu mutex here, we are already dying */
318129b40f10SJanosch Frank 	if (kvm_s390_pv_cpu_get_handle(vcpu))
318229b40f10SJanosch Frank 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3183d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
3184d329c035SChristian Borntraeger }
3185d329c035SChristian Borntraeger 
3186b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
3187b0c632dbSHeiko Carstens {
318829b40f10SJanosch Frank 	u16 rc, rrc;
318929b40f10SJanosch Frank 
319027592ae8SMarc Zyngier 	kvm_destroy_vcpus(kvm);
31917d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
3192d7c5cb01SMichael Mueller 	kvm_s390_gisa_destroy(kvm);
319329b40f10SJanosch Frank 	/*
319429b40f10SJanosch Frank 	 * We are already at the end of life and kvm->lock is not taken.
319529b40f10SJanosch Frank 	 * This is ok as the file descriptor is closed by now and nobody
319629b40f10SJanosch Frank 	 * can mess with the pv state. To avoid lockdep_assert_held from
319729b40f10SJanosch Frank 	 * complaining we do not use kvm_s390_pv_is_protected.
319829b40f10SJanosch Frank 	 */
319929b40f10SJanosch Frank 	if (kvm_s390_pv_get_handle(kvm))
320029b40f10SJanosch Frank 		kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
320129b40f10SJanosch Frank 	debug_unregister(kvm->arch.dbf);
3202c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
320327e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
32046ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
3205841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
320667335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
3207a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
32088335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3209b0c632dbSHeiko Carstens }
3210b0c632dbSHeiko Carstens 
3211b0c632dbSHeiko Carstens /* Section: vcpu related */
3212dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3213b0c632dbSHeiko Carstens {
32146ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
321527e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
321627e0393fSCarsten Otte 		return -ENOMEM;
32172c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
3218dafd032aSDominik Dingel 
321927e0393fSCarsten Otte 	return 0;
322027e0393fSCarsten Otte }
322127e0393fSCarsten Otte 
3222a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3223a6e2f683SEugene (jno) Dvurechenski {
3224a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries())
3225a6940674SDavid Hildenbrand 		return;
32265e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
32277d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
32287d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
32297d43bafcSEugene (jno) Dvurechenski 
32307d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
32317d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
32327d43bafcSEugene (jno) Dvurechenski 	} else {
3233bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3234a6e2f683SEugene (jno) Dvurechenski 
3235a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3236a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
3237a6e2f683SEugene (jno) Dvurechenski 	}
32385e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
32397d43bafcSEugene (jno) Dvurechenski }
3240a6e2f683SEugene (jno) Dvurechenski 
3241eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3242a6e2f683SEugene (jno) Dvurechenski {
3243a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3244a6940674SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3245a6940674SDavid Hildenbrand 
3246a6940674SDavid Hildenbrand 		/* we still need the basic sca for the ipte control */
3247a6940674SDavid Hildenbrand 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
3248a6940674SDavid Hildenbrand 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
3249f07afa04SDavid Hildenbrand 		return;
3250a6940674SDavid Hildenbrand 	}
3251eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
3252eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
3253eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
32547d43bafcSEugene (jno) Dvurechenski 
3255eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
32567d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
32577d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
32580c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3259eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
32607d43bafcSEugene (jno) Dvurechenski 	} else {
3261eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3262a6e2f683SEugene (jno) Dvurechenski 
3263eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
3264a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
3265a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
3266eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3267a6e2f683SEugene (jno) Dvurechenski 	}
3268eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
32695e044315SEugene (jno) Dvurechenski }
32705e044315SEugene (jno) Dvurechenski 
32715e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
32725e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
32735e044315SEugene (jno) Dvurechenski {
32745e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
32755e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
32765e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
32775e044315SEugene (jno) Dvurechenski }
32785e044315SEugene (jno) Dvurechenski 
32795e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
32805e044315SEugene (jno) Dvurechenski {
32815e044315SEugene (jno) Dvurechenski 	int i;
32825e044315SEugene (jno) Dvurechenski 
32835e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
32845e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
32855e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
32865e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
32875e044315SEugene (jno) Dvurechenski }
32885e044315SEugene (jno) Dvurechenski 
32895e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
32905e044315SEugene (jno) Dvurechenski {
32915e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
32925e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
32935e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
329446808a4cSMarc Zyngier 	unsigned long vcpu_idx;
32955e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
32965e044315SEugene (jno) Dvurechenski 
329729b40f10SJanosch Frank 	if (kvm->arch.use_esca)
329829b40f10SJanosch Frank 		return 0;
329929b40f10SJanosch Frank 
3300c4196218SChristian Borntraeger 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
33015e044315SEugene (jno) Dvurechenski 	if (!new_sca)
33025e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
33035e044315SEugene (jno) Dvurechenski 
33045e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
33055e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
33065e044315SEugene (jno) Dvurechenski 
33075e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
33085e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
33095e044315SEugene (jno) Dvurechenski 
33105e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
33115e044315SEugene (jno) Dvurechenski 
33125e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
33135e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
33145e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
33150c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
33165e044315SEugene (jno) Dvurechenski 	}
33175e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
33185e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
33195e044315SEugene (jno) Dvurechenski 
33205e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
33215e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
33225e044315SEugene (jno) Dvurechenski 
33235e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
33245e044315SEugene (jno) Dvurechenski 
33258335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
33268335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
33275e044315SEugene (jno) Dvurechenski 	return 0;
33287d43bafcSEugene (jno) Dvurechenski }
3329a6e2f683SEugene (jno) Dvurechenski 
3330a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3331a6e2f683SEugene (jno) Dvurechenski {
33325e044315SEugene (jno) Dvurechenski 	int rc;
33335e044315SEugene (jno) Dvurechenski 
3334a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3335a6940674SDavid Hildenbrand 		if (id < KVM_MAX_VCPUS)
3336a6940674SDavid Hildenbrand 			return true;
3337a6940674SDavid Hildenbrand 		return false;
3338a6940674SDavid Hildenbrand 	}
33395e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
33405e044315SEugene (jno) Dvurechenski 		return true;
334176a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
33425e044315SEugene (jno) Dvurechenski 		return false;
33435e044315SEugene (jno) Dvurechenski 
33445e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
33455e044315SEugene (jno) Dvurechenski 
33465e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3347a6e2f683SEugene (jno) Dvurechenski }
3348a6e2f683SEugene (jno) Dvurechenski 
3349db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3350db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3351db0758b2SDavid Hildenbrand {
3352db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
33539c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3354db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
33559c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3356db0758b2SDavid Hildenbrand }
3357db0758b2SDavid Hildenbrand 
3358db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3359db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3360db0758b2SDavid Hildenbrand {
3361db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
33629c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3363db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3364db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
33659c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3366db0758b2SDavid Hildenbrand }
3367db0758b2SDavid Hildenbrand 
3368db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3369db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3370db0758b2SDavid Hildenbrand {
3371db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3372db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
3373db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
3374db0758b2SDavid Hildenbrand }
3375db0758b2SDavid Hildenbrand 
3376db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3377db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3378db0758b2SDavid Hildenbrand {
3379db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3380db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
3381db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
3382db0758b2SDavid Hildenbrand }
3383db0758b2SDavid Hildenbrand 
3384db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3385db0758b2SDavid Hildenbrand {
3386db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3387db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
3388db0758b2SDavid Hildenbrand 	preempt_enable();
3389db0758b2SDavid Hildenbrand }
3390db0758b2SDavid Hildenbrand 
3391db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3392db0758b2SDavid Hildenbrand {
3393db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3394db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
3395db0758b2SDavid Hildenbrand 	preempt_enable();
3396db0758b2SDavid Hildenbrand }
3397db0758b2SDavid Hildenbrand 
33984287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
33994287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
34004287f247SDavid Hildenbrand {
3401db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
34029c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3403db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
3404db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
34054287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
34069c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3407db0758b2SDavid Hildenbrand 	preempt_enable();
34084287f247SDavid Hildenbrand }
34094287f247SDavid Hildenbrand 
3410db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
34114287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
34124287f247SDavid Hildenbrand {
34139c23a131SDavid Hildenbrand 	unsigned int seq;
3414db0758b2SDavid Hildenbrand 	__u64 value;
3415db0758b2SDavid Hildenbrand 
3416db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
34174287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
3418db0758b2SDavid Hildenbrand 
34199c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
34209c23a131SDavid Hildenbrand 	do {
34219c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
34229c23a131SDavid Hildenbrand 		/*
34239c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
34249c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
34259c23a131SDavid Hildenbrand 		 */
34269c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3427db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
34289c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
34299c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
3430db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
34319c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
34329c23a131SDavid Hildenbrand 	preempt_enable();
3433db0758b2SDavid Hildenbrand 	return value;
34344287f247SDavid Hildenbrand }
34354287f247SDavid Hildenbrand 
3436b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3437b0c632dbSHeiko Carstens {
34389977e886SHendrik Brueckner 
343937d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
3440ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
34415ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3442db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
344301a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
3444b0c632dbSHeiko Carstens }
3445b0c632dbSHeiko Carstens 
3446b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3447b0c632dbSHeiko Carstens {
344801a745acSDavid Hildenbrand 	vcpu->cpu = -1;
34495ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3450db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
34519daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
345237d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
345337d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
34549977e886SHendrik Brueckner 
3455b0c632dbSHeiko Carstens }
3456b0c632dbSHeiko Carstens 
345731928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
345842897d86SMarcelo Tosatti {
345972f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
3460fdf03650SFan Zhang 	preempt_disable();
346172f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3462d16b52cbSDavid Hildenbrand 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3463fdf03650SFan Zhang 	preempt_enable();
346472f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
346525508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3466dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3467eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
346825508824SDavid Hildenbrand 	}
34696502a34cSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
34706502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
347137d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
347237d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
347342897d86SMarcelo Tosatti }
347442897d86SMarcelo Tosatti 
34758ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
34768ec2fa52SChristian Borntraeger {
34778ec2fa52SChristian Borntraeger 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
34788ec2fa52SChristian Borntraeger 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
34798ec2fa52SChristian Borntraeger 		return true;
34808ec2fa52SChristian Borntraeger 	return false;
34818ec2fa52SChristian Borntraeger }
34828ec2fa52SChristian Borntraeger 
34838ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_ecc(struct kvm *kvm)
34848ec2fa52SChristian Borntraeger {
34858ec2fa52SChristian Borntraeger 	/* At least one ECC subfunction must be present */
34868ec2fa52SChristian Borntraeger 	return kvm_has_pckmo_subfunc(kvm, 32) ||
34878ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 33) ||
34888ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 34) ||
34898ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 40) ||
34908ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 41);
34918ec2fa52SChristian Borntraeger 
34928ec2fa52SChristian Borntraeger }
34938ec2fa52SChristian Borntraeger 
34945102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
34955102ee87STony Krowiak {
3496e585b24aSTony Krowiak 	/*
3497e585b24aSTony Krowiak 	 * If the AP instructions are not being interpreted and the MSAX3
3498e585b24aSTony Krowiak 	 * facility is not configured for the guest, there is nothing to set up.
3499e585b24aSTony Krowiak 	 */
3500e585b24aSTony Krowiak 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
35015102ee87STony Krowiak 		return;
35025102ee87STony Krowiak 
3503e585b24aSTony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3504a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
350537940fb0STony Krowiak 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
35068ec2fa52SChristian Borntraeger 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3507a374e892STony Krowiak 
3508e585b24aSTony Krowiak 	if (vcpu->kvm->arch.crypto.apie)
3509e585b24aSTony Krowiak 		vcpu->arch.sie_block->eca |= ECA_APIE;
3510e585b24aSTony Krowiak 
3511e585b24aSTony Krowiak 	/* Set up protected key support */
35128ec2fa52SChristian Borntraeger 	if (vcpu->kvm->arch.crypto.aes_kw) {
3513a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
35148ec2fa52SChristian Borntraeger 		/* ecc is also wrapped with AES key */
35158ec2fa52SChristian Borntraeger 		if (kvm_has_pckmo_ecc(vcpu->kvm))
35168ec2fa52SChristian Borntraeger 			vcpu->arch.sie_block->ecd |= ECD_ECC;
35178ec2fa52SChristian Borntraeger 	}
35188ec2fa52SChristian Borntraeger 
3519a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
3520a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
35215102ee87STony Krowiak }
35225102ee87STony Krowiak 
3523b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3524b31605c1SDominik Dingel {
3525b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
3526b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
3527b31605c1SDominik Dingel }
3528b31605c1SDominik Dingel 
3529b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3530b31605c1SDominik Dingel {
3531c4196218SChristian Borntraeger 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
3532b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
3533b31605c1SDominik Dingel 		return -ENOMEM;
3534b31605c1SDominik Dingel 	return 0;
3535b31605c1SDominik Dingel }
3536b31605c1SDominik Dingel 
353791520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
353891520f1aSMichael Mueller {
353991520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
354091520f1aSMichael Mueller 
354191520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
354280bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
3543c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
354491520f1aSMichael Mueller }
354591520f1aSMichael Mueller 
3546ff72bb55SSean Christopherson static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3547ff72bb55SSean Christopherson {
3548b31605c1SDominik Dingel 	int rc = 0;
354929b40f10SJanosch Frank 	u16 uvrc, uvrrc;
3550b31288faSKonstantin Weitz 
35519e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
35529e6dabefSCornelia Huck 						    CPUSTAT_SM |
3553a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
3554a4a4f191SGuenther Hutzl 
355553df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
3556ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
355753df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
3558ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3559a4a4f191SGuenther Hutzl 
356091520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
356191520f1aSMichael Mueller 
3562bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3563bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
35640c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3565bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
35660c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3567f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
35680c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_TE;
35697119decfSJanis Schoetterl-Glausch 	if (!kvm_is_ucontrol(vcpu->kvm))
35707119decfSJanis Schoetterl-Glausch 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
35717feb6bb8SMichael Mueller 
3572c9f0a2b8SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
35730c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3574cd1836f5SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 130))
35750c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
35760c9d8683SDavid Hildenbrand 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
357748ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
35780c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_CEI;
357911ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
35800c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_IB;
358137c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
35820c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SII;
358337c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
35840c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
358518280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
35860c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_VX;
35870c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
358813211ea7SEric Farman 	}
35898fa1696eSCollin L. Walling 	if (test_kvm_facility(vcpu->kvm, 139))
35908fa1696eSCollin L. Walling 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3591a3da7b4aSChristian Borntraeger 	if (test_kvm_facility(vcpu->kvm, 156))
3592a3da7b4aSChristian Borntraeger 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3593d7c5cb01SMichael Mueller 	if (vcpu->arch.sie_block->gd) {
3594d7c5cb01SMichael Mueller 		vcpu->arch.sie_block->eca |= ECA_AIV;
3595d7c5cb01SMichael Mueller 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3596d7c5cb01SMichael Mueller 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3597d7c5cb01SMichael Mueller 	}
35984e0b1ab7SFan Zhang 	vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
35994e0b1ab7SFan Zhang 					| SDNXC;
3600c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3601730cd632SFarhan Ali 
3602730cd632SFarhan Ali 	if (sclp.has_kss)
3603ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3604730cd632SFarhan Ali 	else
3605492d8642SThomas Huth 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
36065a5e6536SMatthew Rosato 
3607e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
3608b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3609b31605c1SDominik Dingel 		if (rc)
3610b31605c1SDominik Dingel 			return rc;
3611b31288faSKonstantin Weitz 	}
36120ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3613ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
36149d8d5786SMichael Mueller 
361567d49d52SCollin Walling 	vcpu->arch.sie_block->hpid = HPID_KVM;
361667d49d52SCollin Walling 
36175102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
36185102ee87STony Krowiak 
36193f4bbb43SMatthew Rosato 	kvm_s390_vcpu_pci_setup(vcpu);
36203f4bbb43SMatthew Rosato 
362129b40f10SJanosch Frank 	mutex_lock(&vcpu->kvm->lock);
362229b40f10SJanosch Frank 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
362329b40f10SJanosch Frank 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
362429b40f10SJanosch Frank 		if (rc)
362529b40f10SJanosch Frank 			kvm_s390_vcpu_unsetup_cmma(vcpu);
362629b40f10SJanosch Frank 	}
362729b40f10SJanosch Frank 	mutex_unlock(&vcpu->kvm->lock);
362829b40f10SJanosch Frank 
3629b31605c1SDominik Dingel 	return rc;
3630b0c632dbSHeiko Carstens }
3631b0c632dbSHeiko Carstens 
3632897cc38eSSean Christopherson int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3633897cc38eSSean Christopherson {
3634897cc38eSSean Christopherson 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3635897cc38eSSean Christopherson 		return -EINVAL;
3636897cc38eSSean Christopherson 	return 0;
3637897cc38eSSean Christopherson }
3638897cc38eSSean Christopherson 
3639e529ef66SSean Christopherson int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3640b0c632dbSHeiko Carstens {
36417feb6bb8SMichael Mueller 	struct sie_page *sie_page;
3642897cc38eSSean Christopherson 	int rc;
36434d47555aSCarsten Otte 
3644da72ca4dSQingFeng Hao 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3645c4196218SChristian Borntraeger 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
36467feb6bb8SMichael Mueller 	if (!sie_page)
3647e529ef66SSean Christopherson 		return -ENOMEM;
3648b0c632dbSHeiko Carstens 
36497feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
36507feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
36517feb6bb8SMichael Mueller 
3652efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
3653efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
3654efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
3655efed1104SDavid Hildenbrand 
3656e529ef66SSean Christopherson 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3657ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
3658ee6a569dSMichael Mueller 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
36599c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
3660ba5c1e9bSCarsten Otte 
3661321f8ee5SSean Christopherson 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3662321f8ee5SSean Christopherson 	kvm_clear_async_pf_completion_queue(vcpu);
3663321f8ee5SSean Christopherson 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3664321f8ee5SSean Christopherson 				    KVM_SYNC_GPRS |
3665321f8ee5SSean Christopherson 				    KVM_SYNC_ACRS |
3666321f8ee5SSean Christopherson 				    KVM_SYNC_CRS |
3667321f8ee5SSean Christopherson 				    KVM_SYNC_ARCH0 |
366823a60f83SCollin Walling 				    KVM_SYNC_PFAULT |
366923a60f83SCollin Walling 				    KVM_SYNC_DIAG318;
3670321f8ee5SSean Christopherson 	kvm_s390_set_prefix(vcpu, 0);
3671321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 64))
3672321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3673321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 82))
3674321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3675321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 133))
3676321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3677321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 156))
3678321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3679321f8ee5SSean Christopherson 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3680321f8ee5SSean Christopherson 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3681321f8ee5SSean Christopherson 	 */
3682321f8ee5SSean Christopherson 	if (MACHINE_HAS_VX)
3683321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3684321f8ee5SSean Christopherson 	else
3685321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3686321f8ee5SSean Christopherson 
3687321f8ee5SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm)) {
3688321f8ee5SSean Christopherson 		rc = __kvm_ucontrol_vcpu_init(vcpu);
3689321f8ee5SSean Christopherson 		if (rc)
3690a2017f17SSean Christopherson 			goto out_free_sie_block;
3691321f8ee5SSean Christopherson 	}
3692321f8ee5SSean Christopherson 
3693e529ef66SSean Christopherson 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3694e529ef66SSean Christopherson 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3695e529ef66SSean Christopherson 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3696b0c632dbSHeiko Carstens 
3697ff72bb55SSean Christopherson 	rc = kvm_s390_vcpu_setup(vcpu);
3698ff72bb55SSean Christopherson 	if (rc)
3699ff72bb55SSean Christopherson 		goto out_ucontrol_uninit;
3700e529ef66SSean Christopherson 	return 0;
3701e529ef66SSean Christopherson 
3702ff72bb55SSean Christopherson out_ucontrol_uninit:
3703ff72bb55SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm))
3704ff72bb55SSean Christopherson 		gmap_remove(vcpu->arch.gmap);
37057b06bf2fSWei Yongjun out_free_sie_block:
37067b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
3707e529ef66SSean Christopherson 	return rc;
3708b0c632dbSHeiko Carstens }
3709b0c632dbSHeiko Carstens 
3710b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3711b0c632dbSHeiko Carstens {
37129b57e9d5SHalil Pasic 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
37139a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3714b0c632dbSHeiko Carstens }
3715b0c632dbSHeiko Carstens 
3716199b5763SLongpeng(Mike) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3717199b5763SLongpeng(Mike) {
37180546c63dSLongpeng(Mike) 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3719199b5763SLongpeng(Mike) }
3720199b5763SLongpeng(Mike) 
372127406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
372249b99e1eSChristian Borntraeger {
3723805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
372461a6df54SDavid Hildenbrand 	exit_sie(vcpu);
372549b99e1eSChristian Borntraeger }
372649b99e1eSChristian Borntraeger 
372727406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
372849b99e1eSChristian Borntraeger {
3729805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
373049b99e1eSChristian Borntraeger }
373149b99e1eSChristian Borntraeger 
37328e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
37338e236546SChristian Borntraeger {
3734805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
373561a6df54SDavid Hildenbrand 	exit_sie(vcpu);
37368e236546SChristian Borntraeger }
37378e236546SChristian Borntraeger 
37389ea59728SDavid Hildenbrand bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
37399ea59728SDavid Hildenbrand {
37409ea59728SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->prog20) &
37419ea59728SDavid Hildenbrand 	       (PROG_BLOCK_SIE | PROG_REQUEST);
37429ea59728SDavid Hildenbrand }
37439ea59728SDavid Hildenbrand 
37448e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
37458e236546SChristian Borntraeger {
37469bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
37478e236546SChristian Borntraeger }
37488e236546SChristian Borntraeger 
374949b99e1eSChristian Borntraeger /*
37509ea59728SDavid Hildenbrand  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
375149b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
375249b99e1eSChristian Borntraeger  * return immediately. */
375349b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
375449b99e1eSChristian Borntraeger {
3755ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
37569ea59728SDavid Hildenbrand 	kvm_s390_vsie_kick(vcpu);
375749b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
375849b99e1eSChristian Borntraeger 		cpu_relax();
375949b99e1eSChristian Borntraeger }
376049b99e1eSChristian Borntraeger 
37618e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
37628e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
376349b99e1eSChristian Borntraeger {
3764df06dae3SSean Christopherson 	__kvm_make_request(req, vcpu);
37658e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
376649b99e1eSChristian Borntraeger }
376749b99e1eSChristian Borntraeger 
3768414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3769414d3b07SMartin Schwidefsky 			      unsigned long end)
37702c70fe44SChristian Borntraeger {
37712c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
37722c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
3773414d3b07SMartin Schwidefsky 	unsigned long prefix;
377446808a4cSMarc Zyngier 	unsigned long i;
37752c70fe44SChristian Borntraeger 
377665d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
377765d0b0d4SDavid Hildenbrand 		return;
3778414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
3779414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
3780414d3b07SMartin Schwidefsky 		return;
37812c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
37822c70fe44SChristian Borntraeger 		/* match against both prefix pages */
3783414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
3784414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3785414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3786414d3b07SMartin Schwidefsky 				   start, end);
3787cc65c3a1SSean Christopherson 			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
37882c70fe44SChristian Borntraeger 		}
37892c70fe44SChristian Borntraeger 	}
37902c70fe44SChristian Borntraeger }
37912c70fe44SChristian Borntraeger 
37928b905d28SChristian Borntraeger bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
37938b905d28SChristian Borntraeger {
37948b905d28SChristian Borntraeger 	/* do not poll with more than halt_poll_max_steal percent of steal time */
37958b905d28SChristian Borntraeger 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
37966f390916SSean Christopherson 	    READ_ONCE(halt_poll_max_steal)) {
37978b905d28SChristian Borntraeger 		vcpu->stat.halt_no_poll_steal++;
37988b905d28SChristian Borntraeger 		return true;
37998b905d28SChristian Borntraeger 	}
38008b905d28SChristian Borntraeger 	return false;
38018b905d28SChristian Borntraeger }
38028b905d28SChristian Borntraeger 
3803b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3804b6d33834SChristoffer Dall {
3805b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
3806b6d33834SChristoffer Dall 	BUG();
3807b6d33834SChristoffer Dall 	return 0;
3808b6d33834SChristoffer Dall }
3809b6d33834SChristoffer Dall 
381014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
381114eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
381214eebd91SCarsten Otte {
381314eebd91SCarsten Otte 	int r = -EINVAL;
381414eebd91SCarsten Otte 
381514eebd91SCarsten Otte 	switch (reg->id) {
381629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
381729b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
381829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
381929b7c71bSCarsten Otte 		break;
382029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
382129b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
382229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
382329b7c71bSCarsten Otte 		break;
382446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
38254287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
382646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
382746a6dd1cSJason J. herne 		break;
382846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
382946a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
383046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
383146a6dd1cSJason J. herne 		break;
3832536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
3833536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
3834536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3835536336c2SDominik Dingel 		break;
3836536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
3837536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
3838536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3839536336c2SDominik Dingel 		break;
3840536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
3841536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
3842536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3843536336c2SDominik Dingel 		break;
3844672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
3845672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
3846672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
3847672550fbSChristian Borntraeger 		break;
3848afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
3849afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
3850afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
3851afa45ff5SChristian Borntraeger 		break;
385214eebd91SCarsten Otte 	default:
385314eebd91SCarsten Otte 		break;
385414eebd91SCarsten Otte 	}
385514eebd91SCarsten Otte 
385614eebd91SCarsten Otte 	return r;
385714eebd91SCarsten Otte }
385814eebd91SCarsten Otte 
385914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
386014eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
386114eebd91SCarsten Otte {
386214eebd91SCarsten Otte 	int r = -EINVAL;
38634287f247SDavid Hildenbrand 	__u64 val;
386414eebd91SCarsten Otte 
386514eebd91SCarsten Otte 	switch (reg->id) {
386629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
386729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
386829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
386929b7c71bSCarsten Otte 		break;
387029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
387129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
387229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
387329b7c71bSCarsten Otte 		break;
387446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
38754287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
38764287f247SDavid Hildenbrand 		if (!r)
38774287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
387846a6dd1cSJason J. herne 		break;
387946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
388046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
388146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
388246a6dd1cSJason J. herne 		break;
3883536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
3884536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
3885536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
38869fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
38879fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
3888536336c2SDominik Dingel 		break;
3889536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
3890536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
3891536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3892536336c2SDominik Dingel 		break;
3893536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
3894536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
3895536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3896536336c2SDominik Dingel 		break;
3897672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
3898672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
3899672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
3900672550fbSChristian Borntraeger 		break;
3901afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
3902afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
3903afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
3904afa45ff5SChristian Borntraeger 		break;
390514eebd91SCarsten Otte 	default:
390614eebd91SCarsten Otte 		break;
390714eebd91SCarsten Otte 	}
390814eebd91SCarsten Otte 
390914eebd91SCarsten Otte 	return r;
391014eebd91SCarsten Otte }
3911b6d33834SChristoffer Dall 
39127de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
3913b0c632dbSHeiko Carstens {
39147de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
39157de3f142SJanosch Frank 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
39167de3f142SJanosch Frank 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
39177de3f142SJanosch Frank 
39187de3f142SJanosch Frank 	kvm_clear_async_pf_completion_queue(vcpu);
39197de3f142SJanosch Frank 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
39207de3f142SJanosch Frank 		kvm_s390_vcpu_stop(vcpu);
39217de3f142SJanosch Frank 	kvm_s390_clear_local_irqs(vcpu);
39227de3f142SJanosch Frank }
39237de3f142SJanosch Frank 
39247de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
39257de3f142SJanosch Frank {
39267de3f142SJanosch Frank 	/* Initial reset is a superset of the normal reset */
39277de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
39287de3f142SJanosch Frank 
3929e93fc7b4SChristian Borntraeger 	/*
3930e93fc7b4SChristian Borntraeger 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
3931e93fc7b4SChristian Borntraeger 	 * We do not only reset the internal data, but also ...
3932e93fc7b4SChristian Borntraeger 	 */
39337de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask = 0;
39347de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.addr = 0;
39357de3f142SJanosch Frank 	kvm_s390_set_prefix(vcpu, 0);
39367de3f142SJanosch Frank 	kvm_s390_set_cpu_timer(vcpu, 0);
39377de3f142SJanosch Frank 	vcpu->arch.sie_block->ckc = 0;
39387de3f142SJanosch Frank 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
39397de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
39407de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3941e93fc7b4SChristian Borntraeger 
3942e93fc7b4SChristian Borntraeger 	/* ... the data in sync regs */
3943e93fc7b4SChristian Borntraeger 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3944e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
3945e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3946e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3947e93fc7b4SChristian Borntraeger 	vcpu->run->psw_addr = 0;
3948e93fc7b4SChristian Borntraeger 	vcpu->run->psw_mask = 0;
3949e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.todpr = 0;
3950e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.cputm = 0;
3951e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
3952e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.pp = 0;
3953e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.gbea = 1;
39547de3f142SJanosch Frank 	vcpu->run->s.regs.fpc = 0;
39550f303504SJanosch Frank 	/*
39560f303504SJanosch Frank 	 * Do not reset these registers in the protected case, as some of
39570f303504SJanosch Frank 	 * them are overlayed and they are not accessible in this case
39580f303504SJanosch Frank 	 * anyway.
39590f303504SJanosch Frank 	 */
39600f303504SJanosch Frank 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
39617de3f142SJanosch Frank 		vcpu->arch.sie_block->gbea = 1;
39627de3f142SJanosch Frank 		vcpu->arch.sie_block->pp = 0;
39637de3f142SJanosch Frank 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
39640f303504SJanosch Frank 		vcpu->arch.sie_block->todpr = 0;
39650f303504SJanosch Frank 	}
39667de3f142SJanosch Frank }
39677de3f142SJanosch Frank 
39687de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
39697de3f142SJanosch Frank {
39707de3f142SJanosch Frank 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
39717de3f142SJanosch Frank 
39727de3f142SJanosch Frank 	/* Clear reset is a superset of the initial reset */
39737de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
39747de3f142SJanosch Frank 
39757de3f142SJanosch Frank 	memset(&regs->gprs, 0, sizeof(regs->gprs));
39767de3f142SJanosch Frank 	memset(&regs->vrs, 0, sizeof(regs->vrs));
39777de3f142SJanosch Frank 	memset(&regs->acrs, 0, sizeof(regs->acrs));
39787de3f142SJanosch Frank 	memset(&regs->gscb, 0, sizeof(regs->gscb));
39797de3f142SJanosch Frank 
39807de3f142SJanosch Frank 	regs->etoken = 0;
39817de3f142SJanosch Frank 	regs->etoken_extension = 0;
3982b0c632dbSHeiko Carstens }
3983b0c632dbSHeiko Carstens 
3984b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3985b0c632dbSHeiko Carstens {
3986875656feSChristoffer Dall 	vcpu_load(vcpu);
39875a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
3988875656feSChristoffer Dall 	vcpu_put(vcpu);
3989b0c632dbSHeiko Carstens 	return 0;
3990b0c632dbSHeiko Carstens }
3991b0c632dbSHeiko Carstens 
3992b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3993b0c632dbSHeiko Carstens {
39941fc9b76bSChristoffer Dall 	vcpu_load(vcpu);
39955a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
39961fc9b76bSChristoffer Dall 	vcpu_put(vcpu);
3997b0c632dbSHeiko Carstens 	return 0;
3998b0c632dbSHeiko Carstens }
3999b0c632dbSHeiko Carstens 
4000b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4001b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
4002b0c632dbSHeiko Carstens {
4003b4ef9d4eSChristoffer Dall 	vcpu_load(vcpu);
4004b4ef9d4eSChristoffer Dall 
400559674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4006b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4007b4ef9d4eSChristoffer Dall 
4008b4ef9d4eSChristoffer Dall 	vcpu_put(vcpu);
4009b0c632dbSHeiko Carstens 	return 0;
4010b0c632dbSHeiko Carstens }
4011b0c632dbSHeiko Carstens 
4012b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4013b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
4014b0c632dbSHeiko Carstens {
4015bcdec41cSChristoffer Dall 	vcpu_load(vcpu);
4016bcdec41cSChristoffer Dall 
401759674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4018b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4019bcdec41cSChristoffer Dall 
4020bcdec41cSChristoffer Dall 	vcpu_put(vcpu);
4021b0c632dbSHeiko Carstens 	return 0;
4022b0c632dbSHeiko Carstens }
4023b0c632dbSHeiko Carstens 
4024b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4025b0c632dbSHeiko Carstens {
40266a96bc7fSChristoffer Dall 	int ret = 0;
40276a96bc7fSChristoffer Dall 
40286a96bc7fSChristoffer Dall 	vcpu_load(vcpu);
40296a96bc7fSChristoffer Dall 
40306a96bc7fSChristoffer Dall 	if (test_fp_ctl(fpu->fpc)) {
40316a96bc7fSChristoffer Dall 		ret = -EINVAL;
40326a96bc7fSChristoffer Dall 		goto out;
40336a96bc7fSChristoffer Dall 	}
4034e1788bb9SChristian Borntraeger 	vcpu->run->s.regs.fpc = fpu->fpc;
40359abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
4036a7d4b8f2SDavid Hildenbrand 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4037a7d4b8f2SDavid Hildenbrand 				 (freg_t *) fpu->fprs);
40389abc2a08SDavid Hildenbrand 	else
4039a7d4b8f2SDavid Hildenbrand 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
40406a96bc7fSChristoffer Dall 
40416a96bc7fSChristoffer Dall out:
40426a96bc7fSChristoffer Dall 	vcpu_put(vcpu);
40436a96bc7fSChristoffer Dall 	return ret;
4044b0c632dbSHeiko Carstens }
4045b0c632dbSHeiko Carstens 
4046b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4047b0c632dbSHeiko Carstens {
40481393123eSChristoffer Dall 	vcpu_load(vcpu);
40491393123eSChristoffer Dall 
40509abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
40519abc2a08SDavid Hildenbrand 	save_fpu_regs();
40529abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
4053a7d4b8f2SDavid Hildenbrand 		convert_vx_to_fp((freg_t *) fpu->fprs,
4054a7d4b8f2SDavid Hildenbrand 				 (__vector128 *) vcpu->run->s.regs.vrs);
40559abc2a08SDavid Hildenbrand 	else
4056a7d4b8f2SDavid Hildenbrand 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4057e1788bb9SChristian Borntraeger 	fpu->fpc = vcpu->run->s.regs.fpc;
40581393123eSChristoffer Dall 
40591393123eSChristoffer Dall 	vcpu_put(vcpu);
4060b0c632dbSHeiko Carstens 	return 0;
4061b0c632dbSHeiko Carstens }
4062b0c632dbSHeiko Carstens 
4063b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4064b0c632dbSHeiko Carstens {
4065b0c632dbSHeiko Carstens 	int rc = 0;
4066b0c632dbSHeiko Carstens 
40677a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
4068b0c632dbSHeiko Carstens 		rc = -EBUSY;
4069d7b0b5ebSCarsten Otte 	else {
4070d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
4071d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
4072d7b0b5ebSCarsten Otte 	}
4073b0c632dbSHeiko Carstens 	return rc;
4074b0c632dbSHeiko Carstens }
4075b0c632dbSHeiko Carstens 
4076b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4077b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
4078b0c632dbSHeiko Carstens {
4079b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
4080b0c632dbSHeiko Carstens }
4081b0c632dbSHeiko Carstens 
408227291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
408327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
408427291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
408527291e21SDavid Hildenbrand 
4086d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4087d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
4088b0c632dbSHeiko Carstens {
408927291e21SDavid Hildenbrand 	int rc = 0;
409027291e21SDavid Hildenbrand 
409166b56562SChristoffer Dall 	vcpu_load(vcpu);
409266b56562SChristoffer Dall 
409327291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
409427291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
409527291e21SDavid Hildenbrand 
409666b56562SChristoffer Dall 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
409766b56562SChristoffer Dall 		rc = -EINVAL;
409866b56562SChristoffer Dall 		goto out;
409966b56562SChristoffer Dall 	}
410066b56562SChristoffer Dall 	if (!sclp.has_gpere) {
410166b56562SChristoffer Dall 		rc = -EINVAL;
410266b56562SChristoffer Dall 		goto out;
410366b56562SChristoffer Dall 	}
410427291e21SDavid Hildenbrand 
410527291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
410627291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
410727291e21SDavid Hildenbrand 		/* enforce guest PER */
4108ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
410927291e21SDavid Hildenbrand 
411027291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
411127291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
411227291e21SDavid Hildenbrand 	} else {
41139daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
411427291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
411527291e21SDavid Hildenbrand 	}
411627291e21SDavid Hildenbrand 
411727291e21SDavid Hildenbrand 	if (rc) {
411827291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
411927291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
41209daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
412127291e21SDavid Hildenbrand 	}
412227291e21SDavid Hildenbrand 
412366b56562SChristoffer Dall out:
412466b56562SChristoffer Dall 	vcpu_put(vcpu);
412527291e21SDavid Hildenbrand 	return rc;
4126b0c632dbSHeiko Carstens }
4127b0c632dbSHeiko Carstens 
412862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
412962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
413062d9f0dbSMarcelo Tosatti {
4131fd232561SChristoffer Dall 	int ret;
4132fd232561SChristoffer Dall 
4133fd232561SChristoffer Dall 	vcpu_load(vcpu);
4134fd232561SChristoffer Dall 
41356352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
4136fd232561SChristoffer Dall 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
41376352e4d2SDavid Hildenbrand 				      KVM_MP_STATE_OPERATING;
4138fd232561SChristoffer Dall 
4139fd232561SChristoffer Dall 	vcpu_put(vcpu);
4140fd232561SChristoffer Dall 	return ret;
414162d9f0dbSMarcelo Tosatti }
414262d9f0dbSMarcelo Tosatti 
414362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
414462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
414562d9f0dbSMarcelo Tosatti {
41466352e4d2SDavid Hildenbrand 	int rc = 0;
41476352e4d2SDavid Hildenbrand 
4148e83dff5eSChristoffer Dall 	vcpu_load(vcpu);
4149e83dff5eSChristoffer Dall 
41506352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
415167cf68b6SEric Farman 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
41526352e4d2SDavid Hildenbrand 
41536352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
41546352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
4155fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_stop(vcpu);
41566352e4d2SDavid Hildenbrand 		break;
41576352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
4158fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_start(vcpu);
41596352e4d2SDavid Hildenbrand 		break;
41606352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
41617c36a3fcSJanosch Frank 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
41627c36a3fcSJanosch Frank 			rc = -ENXIO;
41637c36a3fcSJanosch Frank 			break;
41647c36a3fcSJanosch Frank 		}
41657c36a3fcSJanosch Frank 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
41667c36a3fcSJanosch Frank 		break;
41676352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
41683b684a42SJoe Perches 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
41696352e4d2SDavid Hildenbrand 	default:
41706352e4d2SDavid Hildenbrand 		rc = -ENXIO;
41716352e4d2SDavid Hildenbrand 	}
41726352e4d2SDavid Hildenbrand 
4173e83dff5eSChristoffer Dall 	vcpu_put(vcpu);
41746352e4d2SDavid Hildenbrand 	return rc;
417562d9f0dbSMarcelo Tosatti }
417662d9f0dbSMarcelo Tosatti 
41778ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
41788ad35755SDavid Hildenbrand {
41798d5fb0dcSDavid Hildenbrand 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
41808ad35755SDavid Hildenbrand }
41818ad35755SDavid Hildenbrand 
41822c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
41832c70fe44SChristian Borntraeger {
41848ad35755SDavid Hildenbrand retry:
41858e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
41862fa6e1e1SRadim Krčmář 	if (!kvm_request_pending(vcpu))
4187586b7ccdSChristian Borntraeger 		return 0;
41882c70fe44SChristian Borntraeger 	/*
4189cc65c3a1SSean Christopherson 	 * If the guest prefix changed, re-arm the ipte notifier for the
4190b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
41912c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
41922c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
41932c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
41942c70fe44SChristian Borntraeger 	 */
4195cc65c3a1SSean Christopherson 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
41962c70fe44SChristian Borntraeger 		int rc;
4197b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4198fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
4199b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
4200aca411a4SJulius Niedworok 		if (rc) {
4201cc65c3a1SSean Christopherson 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
42022c70fe44SChristian Borntraeger 			return rc;
4203aca411a4SJulius Niedworok 		}
42048ad35755SDavid Hildenbrand 		goto retry;
42052c70fe44SChristian Borntraeger 	}
42068ad35755SDavid Hildenbrand 
4207d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4208d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
4209d3d692c8SDavid Hildenbrand 		goto retry;
4210d3d692c8SDavid Hildenbrand 	}
4211d3d692c8SDavid Hildenbrand 
42128ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
42138ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
42148ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4215ef8f4f49SDavid Hildenbrand 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
42168ad35755SDavid Hildenbrand 		}
42178ad35755SDavid Hildenbrand 		goto retry;
42188ad35755SDavid Hildenbrand 	}
42198ad35755SDavid Hildenbrand 
42208ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
42218ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
42228ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
42239daecfc6SDavid Hildenbrand 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
42248ad35755SDavid Hildenbrand 		}
42258ad35755SDavid Hildenbrand 		goto retry;
42268ad35755SDavid Hildenbrand 	}
42278ad35755SDavid Hildenbrand 
42286502a34cSDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
42296502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
42306502a34cSDavid Hildenbrand 		goto retry;
42316502a34cSDavid Hildenbrand 	}
42326502a34cSDavid Hildenbrand 
4233190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4234190df4a2SClaudio Imbrenda 		/*
4235c9f0a2b8SJanosch Frank 		 * Disable CMM virtualization; we will emulate the ESSA
4236190df4a2SClaudio Imbrenda 		 * instruction manually, in order to provide additional
4237190df4a2SClaudio Imbrenda 		 * functionalities needed for live migration.
4238190df4a2SClaudio Imbrenda 		 */
4239190df4a2SClaudio Imbrenda 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4240190df4a2SClaudio Imbrenda 		goto retry;
4241190df4a2SClaudio Imbrenda 	}
4242190df4a2SClaudio Imbrenda 
4243190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4244190df4a2SClaudio Imbrenda 		/*
4245c9f0a2b8SJanosch Frank 		 * Re-enable CMM virtualization if CMMA is available and
4246c9f0a2b8SJanosch Frank 		 * CMM has been used.
4247190df4a2SClaudio Imbrenda 		 */
4248190df4a2SClaudio Imbrenda 		if ((vcpu->kvm->arch.use_cmma) &&
4249c9f0a2b8SJanosch Frank 		    (vcpu->kvm->mm->context.uses_cmm))
4250190df4a2SClaudio Imbrenda 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4251190df4a2SClaudio Imbrenda 		goto retry;
4252190df4a2SClaudio Imbrenda 	}
4253190df4a2SClaudio Imbrenda 
42540759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
425572875d8aSRadim Krčmář 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
42563194cdb7SDavid Hildenbrand 	/* we left the vsie handler, nothing to do, just clear the request */
42573194cdb7SDavid Hildenbrand 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
42580759d068SDavid Hildenbrand 
42592c70fe44SChristian Borntraeger 	return 0;
42602c70fe44SChristian Borntraeger }
42612c70fe44SChristian Borntraeger 
4262c0573ba5SClaudio Imbrenda static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
42638fa1696eSCollin L. Walling {
42648fa1696eSCollin L. Walling 	struct kvm_vcpu *vcpu;
42652cfd7b73SHeiko Carstens 	union tod_clock clk;
426646808a4cSMarc Zyngier 	unsigned long i;
42678fa1696eSCollin L. Walling 
42688fa1696eSCollin L. Walling 	preempt_disable();
42698fa1696eSCollin L. Walling 
42702cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
42718fa1696eSCollin L. Walling 
42722cfd7b73SHeiko Carstens 	kvm->arch.epoch = gtod->tod - clk.tod;
42730e7def5fSDavid Hildenbrand 	kvm->arch.epdx = 0;
42740e7def5fSDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
42752cfd7b73SHeiko Carstens 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
42768fa1696eSCollin L. Walling 		if (kvm->arch.epoch > gtod->tod)
42778fa1696eSCollin L. Walling 			kvm->arch.epdx -= 1;
42780e7def5fSDavid Hildenbrand 	}
42798fa1696eSCollin L. Walling 
42808fa1696eSCollin L. Walling 	kvm_s390_vcpu_block_all(kvm);
42818fa1696eSCollin L. Walling 	kvm_for_each_vcpu(i, vcpu, kvm) {
42828fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
42838fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
42848fa1696eSCollin L. Walling 	}
42858fa1696eSCollin L. Walling 
42868fa1696eSCollin L. Walling 	kvm_s390_vcpu_unblock_all(kvm);
42878fa1696eSCollin L. Walling 	preempt_enable();
4288c0573ba5SClaudio Imbrenda }
4289c0573ba5SClaudio Imbrenda 
4290c0573ba5SClaudio Imbrenda void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4291c0573ba5SClaudio Imbrenda {
4292c0573ba5SClaudio Imbrenda 	mutex_lock(&kvm->lock);
4293c0573ba5SClaudio Imbrenda 	__kvm_s390_set_tod_clock(kvm, gtod);
42948fa1696eSCollin L. Walling 	mutex_unlock(&kvm->lock);
42958fa1696eSCollin L. Walling }
42968fa1696eSCollin L. Walling 
4297c0573ba5SClaudio Imbrenda int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4298c0573ba5SClaudio Imbrenda {
4299c0573ba5SClaudio Imbrenda 	if (!mutex_trylock(&kvm->lock))
4300c0573ba5SClaudio Imbrenda 		return 0;
4301c0573ba5SClaudio Imbrenda 	__kvm_s390_set_tod_clock(kvm, gtod);
4302c0573ba5SClaudio Imbrenda 	mutex_unlock(&kvm->lock);
4303c0573ba5SClaudio Imbrenda 	return 1;
4304c0573ba5SClaudio Imbrenda }
4305c0573ba5SClaudio Imbrenda 
4306fa576c58SThomas Huth /**
4307fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
4308fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
4309fa576c58SThomas Huth  * @gpa: Guest physical address
4310fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
4311fa576c58SThomas Huth  *
4312fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
4313fa576c58SThomas Huth  *
4314fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
4315fa576c58SThomas Huth  */
4316fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
431724eb3a82SDominik Dingel {
4318527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
4319527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
432024eb3a82SDominik Dingel }
432124eb3a82SDominik Dingel 
43223c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
43233c038e6bSDominik Dingel 				      unsigned long token)
43243c038e6bSDominik Dingel {
43253c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
4326383d0b05SJens Freimann 	struct kvm_s390_irq irq;
43273c038e6bSDominik Dingel 
43283c038e6bSDominik Dingel 	if (start_token) {
4329383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
4330383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
4331383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
43323c038e6bSDominik Dingel 	} else {
43333c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
4334383d0b05SJens Freimann 		inti.parm64 = token;
43353c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
43363c038e6bSDominik Dingel 	}
43373c038e6bSDominik Dingel }
43383c038e6bSDominik Dingel 
43392a18b7e7SVitaly Kuznetsov bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
43403c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
43413c038e6bSDominik Dingel {
43423c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
43433c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
43442a18b7e7SVitaly Kuznetsov 
43452a18b7e7SVitaly Kuznetsov 	return true;
43463c038e6bSDominik Dingel }
43473c038e6bSDominik Dingel 
43483c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
43493c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
43503c038e6bSDominik Dingel {
43513c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
43523c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
43533c038e6bSDominik Dingel }
43543c038e6bSDominik Dingel 
43553c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
43563c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
43573c038e6bSDominik Dingel {
43583c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
43593c038e6bSDominik Dingel }
43603c038e6bSDominik Dingel 
43617c0ade6cSVitaly Kuznetsov bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
43623c038e6bSDominik Dingel {
43633c038e6bSDominik Dingel 	/*
43643c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
43653c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
43663c038e6bSDominik Dingel 	 */
43673c038e6bSDominik Dingel 	return true;
43683c038e6bSDominik Dingel }
43693c038e6bSDominik Dingel 
4370e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
43713c038e6bSDominik Dingel {
43723c038e6bSDominik Dingel 	hva_t hva;
43733c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
43743c038e6bSDominik Dingel 
43753c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4376e8c22266SVitaly Kuznetsov 		return false;
43773c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
43783c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
4379e8c22266SVitaly Kuznetsov 		return false;
43803c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
4381e8c22266SVitaly Kuznetsov 		return false;
43829a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4383e8c22266SVitaly Kuznetsov 		return false;
4384b9224cd7SDavid Hildenbrand 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4385e8c22266SVitaly Kuznetsov 		return false;
43863c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
4387e8c22266SVitaly Kuznetsov 		return false;
43883c038e6bSDominik Dingel 
438981480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
439081480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
439181480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4392e8c22266SVitaly Kuznetsov 		return false;
43933c038e6bSDominik Dingel 
4394e8c22266SVitaly Kuznetsov 	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
43953c038e6bSDominik Dingel }
43963c038e6bSDominik Dingel 
43973fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4398b0c632dbSHeiko Carstens {
43993fb4c40fSThomas Huth 	int rc, cpuflags;
4400e168bf8dSCarsten Otte 
44013c038e6bSDominik Dingel 	/*
44023c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
44033c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
44043c038e6bSDominik Dingel 	 * handled outside the worker.
44053c038e6bSDominik Dingel 	 */
44063c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
44073c038e6bSDominik Dingel 
44087ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
44097ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4410b0c632dbSHeiko Carstens 
4411b0c632dbSHeiko Carstens 	if (need_resched())
4412b0c632dbSHeiko Carstens 		schedule();
4413b0c632dbSHeiko Carstens 
441479395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
441579395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
441679395031SJens Freimann 		if (rc)
441779395031SJens Freimann 			return rc;
441879395031SJens Freimann 	}
44190ff31867SCarsten Otte 
44202c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
44212c70fe44SChristian Borntraeger 	if (rc)
44222c70fe44SChristian Borntraeger 		return rc;
44232c70fe44SChristian Borntraeger 
442427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
442527291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
442627291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
442727291e21SDavid Hildenbrand 	}
442827291e21SDavid Hildenbrand 
44294eeef242SSean Christopherson 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
44309f30f621SMichael Mueller 
4431b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
44323fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
44333fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
44343fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
44352b29a9fdSDominik Dingel 
44363fb4c40fSThomas Huth 	return 0;
44373fb4c40fSThomas Huth }
44383fb4c40fSThomas Huth 
4439492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4440492d8642SThomas Huth {
444156317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
444256317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
444356317920SDavid Hildenbrand 	};
444456317920SDavid Hildenbrand 	u8 opcode, ilen;
4445492d8642SThomas Huth 	int rc;
4446492d8642SThomas Huth 
4447492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4448492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
4449492d8642SThomas Huth 
4450492d8642SThomas Huth 	/*
4451492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
4452492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
4453492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
4454492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
4455492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
4456492d8642SThomas Huth 	 * to be able to forward the PSW.
4457492d8642SThomas Huth 	 */
44583fa8cad7SDavid Hildenbrand 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
445956317920SDavid Hildenbrand 	ilen = insn_length(opcode);
44609b0d721aSDavid Hildenbrand 	if (rc < 0) {
44619b0d721aSDavid Hildenbrand 		return rc;
44629b0d721aSDavid Hildenbrand 	} else if (rc) {
44639b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
44649b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
44659b0d721aSDavid Hildenbrand 		 * nullification if necessary.
44669b0d721aSDavid Hildenbrand 		 */
44679b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
44689b0d721aSDavid Hildenbrand 		ilen = 4;
44699b0d721aSDavid Hildenbrand 	}
447056317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
447156317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
447256317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4473492d8642SThomas Huth }
4474492d8642SThomas Huth 
44753fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
44763fb4c40fSThomas Huth {
44774d62fcc0SQingFeng Hao 	struct mcck_volatile_info *mcck_info;
44784d62fcc0SQingFeng Hao 	struct sie_page *sie_page;
44794d62fcc0SQingFeng Hao 
44802b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
44812b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
44822b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
44832b29a9fdSDominik Dingel 
448427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
448527291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
448627291e21SDavid Hildenbrand 
44877ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
44887ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
448971f116bfSDavid Hildenbrand 
44904d62fcc0SQingFeng Hao 	if (exit_reason == -EINTR) {
44914d62fcc0SQingFeng Hao 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
44924d62fcc0SQingFeng Hao 		sie_page = container_of(vcpu->arch.sie_block,
44934d62fcc0SQingFeng Hao 					struct sie_page, sie_block);
44944d62fcc0SQingFeng Hao 		mcck_info = &sie_page->mcck_info;
44954d62fcc0SQingFeng Hao 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
44964d62fcc0SQingFeng Hao 		return 0;
44974d62fcc0SQingFeng Hao 	}
44984d62fcc0SQingFeng Hao 
449971f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
450071f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
450171f116bfSDavid Hildenbrand 
450271f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
450371f116bfSDavid Hildenbrand 			return rc;
450471f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
450571f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
450671f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
450771f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
450871f116bfSDavid Hildenbrand 		return -EREMOTE;
450971f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
451071f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
451171f116bfSDavid Hildenbrand 		return 0;
4512210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4513210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4514210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
4515210b1607SThomas Huth 						current->thread.gmap_addr;
4516210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
451771f116bfSDavid Hildenbrand 		return -EREMOTE;
451824eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
45193c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
452024eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
452171f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
452271f116bfSDavid Hildenbrand 			return 0;
452350a05be4SChristian Borntraeger 		vcpu->stat.pfault_sync++;
452471f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4525fa576c58SThomas Huth 	}
452671f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
45273fb4c40fSThomas Huth }
45283fb4c40fSThomas Huth 
45293adae0b4SJanosch Frank #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
45303fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
45313fb4c40fSThomas Huth {
45323fb4c40fSThomas Huth 	int rc, exit_reason;
4533c8aac234SJanosch Frank 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
45343fb4c40fSThomas Huth 
4535800c1065SThomas Huth 	/*
4536800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4537800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
4538800c1065SThomas Huth 	 */
45392031f287SSean Christopherson 	kvm_vcpu_srcu_read_lock(vcpu);
4540800c1065SThomas Huth 
4541a76ccff6SThomas Huth 	do {
45423fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
45433fb4c40fSThomas Huth 		if (rc)
4544a76ccff6SThomas Huth 			break;
45453fb4c40fSThomas Huth 
45462031f287SSean Christopherson 		kvm_vcpu_srcu_read_unlock(vcpu);
45473fb4c40fSThomas Huth 		/*
4548a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
4549a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
45503fb4c40fSThomas Huth 		 */
45510097d12eSChristian Borntraeger 		local_irq_disable();
45526edaa530SPaolo Bonzini 		guest_enter_irqoff();
4553db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
45540097d12eSChristian Borntraeger 		local_irq_enable();
4555c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4556c8aac234SJanosch Frank 			memcpy(sie_page->pv_grregs,
4557c8aac234SJanosch Frank 			       vcpu->run->s.regs.gprs,
4558c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
4559c8aac234SJanosch Frank 		}
456056e62a73SSven Schnelle 		if (test_cpu_flag(CIF_FPU))
456156e62a73SSven Schnelle 			load_fpu_regs();
4562a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
4563a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
4564c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4565c8aac234SJanosch Frank 			memcpy(vcpu->run->s.regs.gprs,
4566c8aac234SJanosch Frank 			       sie_page->pv_grregs,
4567c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
45683adae0b4SJanosch Frank 			/*
45693adae0b4SJanosch Frank 			 * We're not allowed to inject interrupts on intercepts
45703adae0b4SJanosch Frank 			 * that leave the guest state in an "in-between" state
45713adae0b4SJanosch Frank 			 * where the next SIE entry will do a continuation.
45723adae0b4SJanosch Frank 			 * Fence interrupts in our "internal" PSW.
45733adae0b4SJanosch Frank 			 */
45743adae0b4SJanosch Frank 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
45753adae0b4SJanosch Frank 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
45763adae0b4SJanosch Frank 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
45773adae0b4SJanosch Frank 			}
4578c8aac234SJanosch Frank 		}
45790097d12eSChristian Borntraeger 		local_irq_disable();
4580db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
45816edaa530SPaolo Bonzini 		guest_exit_irqoff();
45820097d12eSChristian Borntraeger 		local_irq_enable();
45832031f287SSean Christopherson 		kvm_vcpu_srcu_read_lock(vcpu);
45843fb4c40fSThomas Huth 
45853fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
458627291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
45873fb4c40fSThomas Huth 
45882031f287SSean Christopherson 	kvm_vcpu_srcu_read_unlock(vcpu);
4589e168bf8dSCarsten Otte 	return rc;
4590b0c632dbSHeiko Carstens }
4591b0c632dbSHeiko Carstens 
45922f0a83beSTianjia Zhang static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4593b028ee3eSDavid Hildenbrand {
45942f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
45954d5f2c04SChristian Borntraeger 	struct runtime_instr_cb *riccb;
45964e0b1ab7SFan Zhang 	struct gs_cb *gscb;
45974d5f2c04SChristian Borntraeger 
45984d5f2c04SChristian Borntraeger 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
45994e0b1ab7SFan Zhang 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4600b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4601b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4602b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4603b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4604b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4605b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4606b028ee3eSDavid Hildenbrand 	}
4607b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4608b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4609b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4610b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
46119fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
46129fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
4613b028ee3eSDavid Hildenbrand 	}
461423a60f83SCollin Walling 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
461523a60f83SCollin Walling 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
461623a60f83SCollin Walling 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
46173fd8417fSCollin Walling 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
461823a60f83SCollin Walling 	}
461980cd8763SFan Zhang 	/*
462080cd8763SFan Zhang 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
462180cd8763SFan Zhang 	 * we should enable RI here instead of doing the lazy enablement.
462280cd8763SFan Zhang 	 */
462380cd8763SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
46244d5f2c04SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 64) &&
4625bb59c2daSAlice Frosi 	    riccb->v &&
46260c9d8683SDavid Hildenbrand 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
46274d5f2c04SChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
46280c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
462980cd8763SFan Zhang 	}
46304e0b1ab7SFan Zhang 	/*
46314e0b1ab7SFan Zhang 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
46324e0b1ab7SFan Zhang 	 * we should enable GS here instead of doing the lazy enablement.
46334e0b1ab7SFan Zhang 	 */
46344e0b1ab7SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
46354e0b1ab7SFan Zhang 	    test_kvm_facility(vcpu->kvm, 133) &&
46364e0b1ab7SFan Zhang 	    gscb->gssm &&
46374e0b1ab7SFan Zhang 	    !vcpu->arch.gs_enabled) {
46384e0b1ab7SFan Zhang 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
46394e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecb |= ECB_GS;
46404e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
46414e0b1ab7SFan Zhang 		vcpu->arch.gs_enabled = 1;
464280cd8763SFan Zhang 	}
464335b3fde6SChristian Borntraeger 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
464435b3fde6SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 82)) {
464535b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
464635b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
464735b3fde6SChristian Borntraeger 	}
46484e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
46494e0b1ab7SFan Zhang 		preempt_disable();
46504e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
46514e0b1ab7SFan Zhang 		if (current->thread.gs_cb) {
46524e0b1ab7SFan Zhang 			vcpu->arch.host_gscb = current->thread.gs_cb;
46534e0b1ab7SFan Zhang 			save_gs_cb(vcpu->arch.host_gscb);
46544e0b1ab7SFan Zhang 		}
46554e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled) {
46564e0b1ab7SFan Zhang 			current->thread.gs_cb = (struct gs_cb *)
46574e0b1ab7SFan Zhang 						&vcpu->run->s.regs.gscb;
46584e0b1ab7SFan Zhang 			restore_gs_cb(current->thread.gs_cb);
46594e0b1ab7SFan Zhang 		}
46604e0b1ab7SFan Zhang 		preempt_enable();
46614e0b1ab7SFan Zhang 	}
4662a3da7b4aSChristian Borntraeger 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4663811ea797SJanosch Frank }
4664811ea797SJanosch Frank 
46652f0a83beSTianjia Zhang static void sync_regs(struct kvm_vcpu *vcpu)
4666811ea797SJanosch Frank {
46672f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
46682f0a83beSTianjia Zhang 
4669811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4670811ea797SJanosch Frank 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4671811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4672811ea797SJanosch Frank 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4673811ea797SJanosch Frank 		/* some control register changes require a tlb flush */
4674811ea797SJanosch Frank 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4675811ea797SJanosch Frank 	}
4676811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4677811ea797SJanosch Frank 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4678811ea797SJanosch Frank 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4679811ea797SJanosch Frank 	}
4680811ea797SJanosch Frank 	save_access_regs(vcpu->arch.host_acrs);
4681811ea797SJanosch Frank 	restore_access_regs(vcpu->run->s.regs.acrs);
4682811ea797SJanosch Frank 	/* save host (userspace) fprs/vrs */
4683811ea797SJanosch Frank 	save_fpu_regs();
4684811ea797SJanosch Frank 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4685811ea797SJanosch Frank 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4686811ea797SJanosch Frank 	if (MACHINE_HAS_VX)
4687811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4688811ea797SJanosch Frank 	else
4689811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4690811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4691811ea797SJanosch Frank 	if (test_fp_ctl(current->thread.fpu.fpc))
4692811ea797SJanosch Frank 		/* User space provided an invalid FPC, let's clear it */
4693811ea797SJanosch Frank 		current->thread.fpu.fpc = 0;
4694811ea797SJanosch Frank 
4695811ea797SJanosch Frank 	/* Sync fmt2 only data */
4696811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
46972f0a83beSTianjia Zhang 		sync_regs_fmt2(vcpu);
4698811ea797SJanosch Frank 	} else {
4699811ea797SJanosch Frank 		/*
4700811ea797SJanosch Frank 		 * In several places we have to modify our internal view to
4701811ea797SJanosch Frank 		 * not do things that are disallowed by the ultravisor. For
4702811ea797SJanosch Frank 		 * example we must not inject interrupts after specific exits
4703811ea797SJanosch Frank 		 * (e.g. 112 prefix page not secure). We do this by turning
4704811ea797SJanosch Frank 		 * off the machine check, external and I/O interrupt bits
4705811ea797SJanosch Frank 		 * of our PSW copy. To avoid getting validity intercepts, we
4706811ea797SJanosch Frank 		 * do only accept the condition code from userspace.
4707811ea797SJanosch Frank 		 */
4708811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4709811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4710811ea797SJanosch Frank 						   PSW_MASK_CC;
4711811ea797SJanosch Frank 	}
471280cd8763SFan Zhang 
4713b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
4714b028ee3eSDavid Hildenbrand }
4715b028ee3eSDavid Hildenbrand 
47162f0a83beSTianjia Zhang static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4717b028ee3eSDavid Hildenbrand {
47182f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
47192f0a83beSTianjia Zhang 
4720b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4721b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4722b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
472335b3fde6SChristian Borntraeger 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
472423a60f83SCollin Walling 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
47254e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
472644bada28SHeiko Carstens 		preempt_disable();
47274e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
47284e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled)
47294e0b1ab7SFan Zhang 			save_gs_cb(current->thread.gs_cb);
47304e0b1ab7SFan Zhang 		current->thread.gs_cb = vcpu->arch.host_gscb;
47314e0b1ab7SFan Zhang 		restore_gs_cb(vcpu->arch.host_gscb);
47324e0b1ab7SFan Zhang 		if (!vcpu->arch.host_gscb)
47334e0b1ab7SFan Zhang 			__ctl_clear_bit(2, 4);
47344e0b1ab7SFan Zhang 		vcpu->arch.host_gscb = NULL;
473544bada28SHeiko Carstens 		preempt_enable();
47364e0b1ab7SFan Zhang 	}
4737a3da7b4aSChristian Borntraeger 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4738b028ee3eSDavid Hildenbrand }
4739b028ee3eSDavid Hildenbrand 
47402f0a83beSTianjia Zhang static void store_regs(struct kvm_vcpu *vcpu)
4741811ea797SJanosch Frank {
47422f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
47432f0a83beSTianjia Zhang 
4744811ea797SJanosch Frank 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4745811ea797SJanosch Frank 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4746811ea797SJanosch Frank 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4747811ea797SJanosch Frank 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4748811ea797SJanosch Frank 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4749811ea797SJanosch Frank 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4750811ea797SJanosch Frank 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4751811ea797SJanosch Frank 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4752811ea797SJanosch Frank 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4753811ea797SJanosch Frank 	save_access_regs(vcpu->run->s.regs.acrs);
4754811ea797SJanosch Frank 	restore_access_regs(vcpu->arch.host_acrs);
4755811ea797SJanosch Frank 	/* Save guest register state */
4756811ea797SJanosch Frank 	save_fpu_regs();
4757811ea797SJanosch Frank 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4758811ea797SJanosch Frank 	/* Restore will be done lazily at return */
4759811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4760811ea797SJanosch Frank 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4761811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
47622f0a83beSTianjia Zhang 		store_regs_fmt2(vcpu);
4763811ea797SJanosch Frank }
4764811ea797SJanosch Frank 
47651b94f6f8STianjia Zhang int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4766b0c632dbSHeiko Carstens {
47671b94f6f8STianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
47688f2abe6aSChristian Borntraeger 	int rc;
4769b0c632dbSHeiko Carstens 
47700460eb35SJanosch Frank 	/*
47710460eb35SJanosch Frank 	 * Running a VM while dumping always has the potential to
47720460eb35SJanosch Frank 	 * produce inconsistent dump data. But for PV vcpus a SIE
47730460eb35SJanosch Frank 	 * entry while dumping could also lead to a fatal validity
47740460eb35SJanosch Frank 	 * intercept which we absolutely want to avoid.
47750460eb35SJanosch Frank 	 */
47760460eb35SJanosch Frank 	if (vcpu->kvm->arch.pv.dumping)
47770460eb35SJanosch Frank 		return -EINVAL;
47780460eb35SJanosch Frank 
4779460df4c1SPaolo Bonzini 	if (kvm_run->immediate_exit)
4780460df4c1SPaolo Bonzini 		return -EINTR;
4781460df4c1SPaolo Bonzini 
4782200824f5SThomas Huth 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4783200824f5SThomas Huth 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4784200824f5SThomas Huth 		return -EINVAL;
4785200824f5SThomas Huth 
4786accb757dSChristoffer Dall 	vcpu_load(vcpu);
4787accb757dSChristoffer Dall 
478827291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
478927291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
4790accb757dSChristoffer Dall 		rc = 0;
4791accb757dSChristoffer Dall 		goto out;
479227291e21SDavid Hildenbrand 	}
479327291e21SDavid Hildenbrand 
479420b7035cSJan H. Schönherr 	kvm_sigset_activate(vcpu);
4795b0c632dbSHeiko Carstens 
4796fe28c786SJanosch Frank 	/*
4797fe28c786SJanosch Frank 	 * no need to check the return value of vcpu_start as it can only have
4798fe28c786SJanosch Frank 	 * an error for protvirt, but protvirt means user cpu state
4799fe28c786SJanosch Frank 	 */
48006352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
48016852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
48026352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
4803ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
48046352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
4805accb757dSChristoffer Dall 		rc = -EINVAL;
4806accb757dSChristoffer Dall 		goto out;
48076352e4d2SDavid Hildenbrand 	}
4808b0c632dbSHeiko Carstens 
48092f0a83beSTianjia Zhang 	sync_regs(vcpu);
4810db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
4811d7b0b5ebSCarsten Otte 
4812dab4079dSHeiko Carstens 	might_fault();
4813e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
48149ace903dSChristian Ehrhardt 
4815b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
4816b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
48178f2abe6aSChristian Borntraeger 		rc = -EINTR;
4818b1d16c49SChristian Ehrhardt 	}
48198f2abe6aSChristian Borntraeger 
482027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
482127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
482227291e21SDavid Hildenbrand 		rc = 0;
482327291e21SDavid Hildenbrand 	}
482427291e21SDavid Hildenbrand 
48258f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
482671f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
48278f2abe6aSChristian Borntraeger 		rc = 0;
48288f2abe6aSChristian Borntraeger 	}
48298f2abe6aSChristian Borntraeger 
4830db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
48312f0a83beSTianjia Zhang 	store_regs(vcpu);
4832d7b0b5ebSCarsten Otte 
483320b7035cSJan H. Schönherr 	kvm_sigset_deactivate(vcpu);
4834b0c632dbSHeiko Carstens 
4835b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
4836accb757dSChristoffer Dall out:
4837accb757dSChristoffer Dall 	vcpu_put(vcpu);
48387e8e6ab4SHeiko Carstens 	return rc;
4839b0c632dbSHeiko Carstens }
4840b0c632dbSHeiko Carstens 
4841b0c632dbSHeiko Carstens /*
4842b0c632dbSHeiko Carstens  * store status at address
4843b0c632dbSHeiko Carstens  * we use have two special cases:
4844b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4845b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4846b0c632dbSHeiko Carstens  */
4847d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4848b0c632dbSHeiko Carstens {
4849092670cdSCarsten Otte 	unsigned char archmode = 1;
48509abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
4851fda902cbSMichael Mueller 	unsigned int px;
48524287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
4853d0bce605SHeiko Carstens 	int rc;
4854b0c632dbSHeiko Carstens 
4855d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
4856d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4857d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
4858b0c632dbSHeiko Carstens 			return -EFAULT;
4859d9a3a09aSMartin Schwidefsky 		gpa = 0;
4860d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4861d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
4862b0c632dbSHeiko Carstens 			return -EFAULT;
4863d9a3a09aSMartin Schwidefsky 		gpa = px;
4864d9a3a09aSMartin Schwidefsky 	} else
4865d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
48669abc2a08SDavid Hildenbrand 
48679abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
48689abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
48699522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4870d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
48719abc2a08SDavid Hildenbrand 				     fprs, 128);
48729abc2a08SDavid Hildenbrand 	} else {
48739abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
48746fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
48759abc2a08SDavid Hildenbrand 	}
4876d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4877d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
4878d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4879d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
4880d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4881fda902cbSMichael Mueller 			      &px, 4);
4882d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
48839abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
4884d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4885d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
48864287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
4887d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
48884287f247SDavid Hildenbrand 			      &cputm, 8);
4889178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
4890d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4891d0bce605SHeiko Carstens 			      &clkcomp, 8);
4892d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4893d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
4894d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4895d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
4896d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
4897b0c632dbSHeiko Carstens }
4898b0c632dbSHeiko Carstens 
4899e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4900e879892cSThomas Huth {
4901e879892cSThomas Huth 	/*
4902e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
490331d8b8d4SChristian Borntraeger 	 * switch in the run ioctl. Let's update our copies before we save
4904e879892cSThomas Huth 	 * it into the save area
4905e879892cSThomas Huth 	 */
4906d0164ee2SHendrik Brueckner 	save_fpu_regs();
49079abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4908e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
4909e879892cSThomas Huth 
4910e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
4911e879892cSThomas Huth }
4912e879892cSThomas Huth 
49138ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
49148ad35755SDavid Hildenbrand {
49158ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
49168e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
49178ad35755SDavid Hildenbrand }
49188ad35755SDavid Hildenbrand 
49198ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
49208ad35755SDavid Hildenbrand {
492146808a4cSMarc Zyngier 	unsigned long i;
49228ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
49238ad35755SDavid Hildenbrand 
49248ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
49258ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
49268ad35755SDavid Hildenbrand 	}
49278ad35755SDavid Hildenbrand }
49288ad35755SDavid Hildenbrand 
49298ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
49308ad35755SDavid Hildenbrand {
493109a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
493209a400e7SDavid Hildenbrand 		return;
49338ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
49348e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
49358ad35755SDavid Hildenbrand }
49368ad35755SDavid Hildenbrand 
4937fe28c786SJanosch Frank int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
49386852d7b6SDavid Hildenbrand {
4939fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
49408ad35755SDavid Hildenbrand 
49418ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
4942fe28c786SJanosch Frank 		return 0;
49438ad35755SDavid Hildenbrand 
49446852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
49458ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
4946433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
49478ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
49488ad35755SDavid Hildenbrand 
4949fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the operating state */
4950fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4951fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4952fe28c786SJanosch Frank 		if (r) {
4953fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4954fe28c786SJanosch Frank 			return r;
4955fe28c786SJanosch Frank 		}
4956fe28c786SJanosch Frank 	}
4957fe28c786SJanosch Frank 
49588ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
4959113d10bcSMarc Zyngier 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
49608ad35755SDavid Hildenbrand 			started_vcpus++;
49618ad35755SDavid Hildenbrand 	}
49628ad35755SDavid Hildenbrand 
49638ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
49648ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
49658ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
49668ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
49678ad35755SDavid Hildenbrand 		/*
49688ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
49698ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
497038860756SBhaskar Chowdhury 		 * outstanding ENABLE requests.
49718ad35755SDavid Hildenbrand 		 */
49728ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
49738ad35755SDavid Hildenbrand 	}
49748ad35755SDavid Hildenbrand 
49759daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
49768ad35755SDavid Hildenbrand 	/*
497772f21820SChristian Borntraeger 	 * The real PSW might have changed due to a RESTART interpreted by the
497872f21820SChristian Borntraeger 	 * ultravisor. We block all interrupts and let the next sie exit
497972f21820SChristian Borntraeger 	 * refresh our view.
498072f21820SChristian Borntraeger 	 */
498172f21820SChristian Borntraeger 	if (kvm_s390_pv_cpu_is_protected(vcpu))
498272f21820SChristian Borntraeger 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
498372f21820SChristian Borntraeger 	/*
49848ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
49858ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
49868ad35755SDavid Hildenbrand 	 */
4987d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4988433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4989fe28c786SJanosch Frank 	return 0;
49906852d7b6SDavid Hildenbrand }
49916852d7b6SDavid Hildenbrand 
4992fe28c786SJanosch Frank int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
49936852d7b6SDavid Hildenbrand {
4994fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
49958ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
49968ad35755SDavid Hildenbrand 
49978ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
4998fe28c786SJanosch Frank 		return 0;
49998ad35755SDavid Hildenbrand 
50006852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
50018ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5002433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
50038ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
50048ad35755SDavid Hildenbrand 
5005fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the stopped state */
5006fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5007fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5008fe28c786SJanosch Frank 		if (r) {
5009fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5010fe28c786SJanosch Frank 			return r;
5011fe28c786SJanosch Frank 		}
5012fe28c786SJanosch Frank 	}
5013fe28c786SJanosch Frank 
5014812de046SEric Farman 	/*
5015812de046SEric Farman 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5016812de046SEric Farman 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5017812de046SEric Farman 	 * have been fully processed. This will ensure that the VCPU
5018812de046SEric Farman 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5019812de046SEric Farman 	 */
5020812de046SEric Farman 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
50216cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
502232f5ff63SDavid Hildenbrand 
50238ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
50248ad35755SDavid Hildenbrand 
50258ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
5026113d10bcSMarc Zyngier 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5027113d10bcSMarc Zyngier 
5028113d10bcSMarc Zyngier 		if (!is_vcpu_stopped(tmp)) {
50298ad35755SDavid Hildenbrand 			started_vcpus++;
5030113d10bcSMarc Zyngier 			started_vcpu = tmp;
50318ad35755SDavid Hildenbrand 		}
50328ad35755SDavid Hildenbrand 	}
50338ad35755SDavid Hildenbrand 
50348ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
50358ad35755SDavid Hildenbrand 		/*
50368ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
50378ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
50388ad35755SDavid Hildenbrand 		 */
50398ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
50408ad35755SDavid Hildenbrand 	}
50418ad35755SDavid Hildenbrand 
5042433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5043fe28c786SJanosch Frank 	return 0;
50446852d7b6SDavid Hildenbrand }
50456852d7b6SDavid Hildenbrand 
5046d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5047d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
5048d6712df9SCornelia Huck {
5049d6712df9SCornelia Huck 	int r;
5050d6712df9SCornelia Huck 
5051d6712df9SCornelia Huck 	if (cap->flags)
5052d6712df9SCornelia Huck 		return -EINVAL;
5053d6712df9SCornelia Huck 
5054d6712df9SCornelia Huck 	switch (cap->cap) {
5055fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
5056fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
5057fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
5058c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5059fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
5060fa6b7fe9SCornelia Huck 		}
5061fa6b7fe9SCornelia Huck 		r = 0;
5062fa6b7fe9SCornelia Huck 		break;
5063d6712df9SCornelia Huck 	default:
5064d6712df9SCornelia Huck 		r = -EINVAL;
5065d6712df9SCornelia Huck 		break;
5066d6712df9SCornelia Huck 	}
5067d6712df9SCornelia Huck 	return r;
5068d6712df9SCornelia Huck }
5069d6712df9SCornelia Huck 
50700e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
507119e12277SJanosch Frank 				  struct kvm_s390_mem_op *mop)
507219e12277SJanosch Frank {
507319e12277SJanosch Frank 	void __user *uaddr = (void __user *)mop->buf;
507419e12277SJanosch Frank 	int r = 0;
507519e12277SJanosch Frank 
507619e12277SJanosch Frank 	if (mop->flags || !mop->size)
507719e12277SJanosch Frank 		return -EINVAL;
507819e12277SJanosch Frank 	if (mop->size + mop->sida_offset < mop->size)
507919e12277SJanosch Frank 		return -EINVAL;
508019e12277SJanosch Frank 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
508119e12277SJanosch Frank 		return -E2BIG;
50822c212e1bSJanis Schoetterl-Glausch 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
50832c212e1bSJanis Schoetterl-Glausch 		return -EINVAL;
508419e12277SJanosch Frank 
508519e12277SJanosch Frank 	switch (mop->op) {
508619e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
508719e12277SJanosch Frank 		if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
508819e12277SJanosch Frank 				 mop->sida_offset), mop->size))
508919e12277SJanosch Frank 			r = -EFAULT;
509019e12277SJanosch Frank 
509119e12277SJanosch Frank 		break;
509219e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
509319e12277SJanosch Frank 		if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
509419e12277SJanosch Frank 				   mop->sida_offset), uaddr, mop->size))
509519e12277SJanosch Frank 			r = -EFAULT;
509619e12277SJanosch Frank 		break;
509719e12277SJanosch Frank 	}
509819e12277SJanosch Frank 	return r;
509919e12277SJanosch Frank }
51000e1234c0SJanis Schoetterl-Glausch 
51010e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
510241408c28SThomas Huth 				 struct kvm_s390_mem_op *mop)
510341408c28SThomas Huth {
510441408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
510541408c28SThomas Huth 	void *tmpbuf = NULL;
510619e12277SJanosch Frank 	int r = 0;
510741408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
5108e9e9feebSJanis Schoetterl-Glausch 				    | KVM_S390_MEMOP_F_CHECK_ONLY
5109e9e9feebSJanis Schoetterl-Glausch 				    | KVM_S390_MEMOP_F_SKEY_PROTECTION;
511041408c28SThomas Huth 
5111a13b03bbSThomas Huth 	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
511241408c28SThomas Huth 		return -EINVAL;
511341408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
511441408c28SThomas Huth 		return -E2BIG;
511519e12277SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu))
511619e12277SJanosch Frank 		return -EINVAL;
5117e9e9feebSJanis Schoetterl-Glausch 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
5118e9e9feebSJanis Schoetterl-Glausch 		if (access_key_invalid(mop->key))
5119e9e9feebSJanis Schoetterl-Glausch 			return -EINVAL;
5120e9e9feebSJanis Schoetterl-Glausch 	} else {
5121e9e9feebSJanis Schoetterl-Glausch 		mop->key = 0;
5122e9e9feebSJanis Schoetterl-Glausch 	}
512341408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
512441408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
512541408c28SThomas Huth 		if (!tmpbuf)
512641408c28SThomas Huth 			return -ENOMEM;
512741408c28SThomas Huth 	}
512841408c28SThomas Huth 
512941408c28SThomas Huth 	switch (mop->op) {
513041408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
513141408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5132e9e9feebSJanis Schoetterl-Glausch 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5133e9e9feebSJanis Schoetterl-Glausch 					    GACC_FETCH, mop->key);
513441408c28SThomas Huth 			break;
513541408c28SThomas Huth 		}
5136e9e9feebSJanis Schoetterl-Glausch 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5137e9e9feebSJanis Schoetterl-Glausch 					mop->size, mop->key);
513841408c28SThomas Huth 		if (r == 0) {
513941408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
514041408c28SThomas Huth 				r = -EFAULT;
514141408c28SThomas Huth 		}
514241408c28SThomas Huth 		break;
514341408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
514441408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5145e9e9feebSJanis Schoetterl-Glausch 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5146e9e9feebSJanis Schoetterl-Glausch 					    GACC_STORE, mop->key);
514741408c28SThomas Huth 			break;
514841408c28SThomas Huth 		}
514941408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
515041408c28SThomas Huth 			r = -EFAULT;
515141408c28SThomas Huth 			break;
515241408c28SThomas Huth 		}
5153e9e9feebSJanis Schoetterl-Glausch 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5154e9e9feebSJanis Schoetterl-Glausch 					 mop->size, mop->key);
515541408c28SThomas Huth 		break;
515641408c28SThomas Huth 	}
515741408c28SThomas Huth 
515841408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
515941408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
516041408c28SThomas Huth 
516141408c28SThomas Huth 	vfree(tmpbuf);
516241408c28SThomas Huth 	return r;
516341408c28SThomas Huth }
516441408c28SThomas Huth 
51650e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
516619e12277SJanosch Frank 				     struct kvm_s390_mem_op *mop)
516719e12277SJanosch Frank {
516819e12277SJanosch Frank 	int r, srcu_idx;
516919e12277SJanosch Frank 
517019e12277SJanosch Frank 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
517119e12277SJanosch Frank 
517219e12277SJanosch Frank 	switch (mop->op) {
517319e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_READ:
517419e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_WRITE:
51750e1234c0SJanis Schoetterl-Glausch 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
517619e12277SJanosch Frank 		break;
517719e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
517819e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
517919e12277SJanosch Frank 		/* we are locked against sida going away by the vcpu->mutex */
51800e1234c0SJanis Schoetterl-Glausch 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
518119e12277SJanosch Frank 		break;
518219e12277SJanosch Frank 	default:
518319e12277SJanosch Frank 		r = -EINVAL;
518419e12277SJanosch Frank 	}
518519e12277SJanosch Frank 
518619e12277SJanosch Frank 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
518719e12277SJanosch Frank 	return r;
518819e12277SJanosch Frank }
518919e12277SJanosch Frank 
51905cb0944cSPaolo Bonzini long kvm_arch_vcpu_async_ioctl(struct file *filp,
5191b0c632dbSHeiko Carstens 			       unsigned int ioctl, unsigned long arg)
5192b0c632dbSHeiko Carstens {
5193b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
5194b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
5195b0c632dbSHeiko Carstens 
519693736624SAvi Kivity 	switch (ioctl) {
519747b43c52SJens Freimann 	case KVM_S390_IRQ: {
519847b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
519947b43c52SJens Freimann 
520047b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
52019b062471SChristoffer Dall 			return -EFAULT;
52029b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
520347b43c52SJens Freimann 	}
520493736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
5205ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
520653936b5bSThomas Huth 		struct kvm_s390_irq s390irq = {};
5207ba5c1e9bSCarsten Otte 
5208ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
52099b062471SChristoffer Dall 			return -EFAULT;
5210383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
5211383d0b05SJens Freimann 			return -EINVAL;
52129b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
5213ba5c1e9bSCarsten Otte 	}
52149b062471SChristoffer Dall 	}
52155cb0944cSPaolo Bonzini 	return -ENOIOCTLCMD;
52165cb0944cSPaolo Bonzini }
52175cb0944cSPaolo Bonzini 
52188aba0958SJanosch Frank static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
52198aba0958SJanosch Frank 					struct kvm_pv_cmd *cmd)
52208aba0958SJanosch Frank {
52218aba0958SJanosch Frank 	struct kvm_s390_pv_dmp dmp;
52228aba0958SJanosch Frank 	void *data;
52238aba0958SJanosch Frank 	int ret;
52248aba0958SJanosch Frank 
52258aba0958SJanosch Frank 	/* Dump initialization is a prerequisite */
52268aba0958SJanosch Frank 	if (!vcpu->kvm->arch.pv.dumping)
52278aba0958SJanosch Frank 		return -EINVAL;
52288aba0958SJanosch Frank 
52298aba0958SJanosch Frank 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
52308aba0958SJanosch Frank 		return -EFAULT;
52318aba0958SJanosch Frank 
52328aba0958SJanosch Frank 	/* We only handle this subcmd right now */
52338aba0958SJanosch Frank 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
52348aba0958SJanosch Frank 		return -EINVAL;
52358aba0958SJanosch Frank 
52368aba0958SJanosch Frank 	/* CPU dump length is the same as create cpu storage donation. */
52378aba0958SJanosch Frank 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
52388aba0958SJanosch Frank 		return -EINVAL;
52398aba0958SJanosch Frank 
52408aba0958SJanosch Frank 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
52418aba0958SJanosch Frank 	if (!data)
52428aba0958SJanosch Frank 		return -ENOMEM;
52438aba0958SJanosch Frank 
52448aba0958SJanosch Frank 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
52458aba0958SJanosch Frank 
52468aba0958SJanosch Frank 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
52478aba0958SJanosch Frank 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
52488aba0958SJanosch Frank 
52498aba0958SJanosch Frank 	if (ret)
52508aba0958SJanosch Frank 		ret = -EINVAL;
52518aba0958SJanosch Frank 
52528aba0958SJanosch Frank 	/* On success copy over the dump data */
52538aba0958SJanosch Frank 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
52548aba0958SJanosch Frank 		ret = -EFAULT;
52558aba0958SJanosch Frank 
52568aba0958SJanosch Frank 	kvfree(data);
52578aba0958SJanosch Frank 	return ret;
52588aba0958SJanosch Frank }
52598aba0958SJanosch Frank 
52605cb0944cSPaolo Bonzini long kvm_arch_vcpu_ioctl(struct file *filp,
52615cb0944cSPaolo Bonzini 			 unsigned int ioctl, unsigned long arg)
52625cb0944cSPaolo Bonzini {
52635cb0944cSPaolo Bonzini 	struct kvm_vcpu *vcpu = filp->private_data;
52645cb0944cSPaolo Bonzini 	void __user *argp = (void __user *)arg;
52655cb0944cSPaolo Bonzini 	int idx;
52665cb0944cSPaolo Bonzini 	long r;
52678a8378faSJanosch Frank 	u16 rc, rrc;
52689b062471SChristoffer Dall 
52699b062471SChristoffer Dall 	vcpu_load(vcpu);
52709b062471SChristoffer Dall 
52719b062471SChristoffer Dall 	switch (ioctl) {
5272b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
5273800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
527455680890SChristian Borntraeger 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5275800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5276bc923cc9SAvi Kivity 		break;
5277b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
5278b0c632dbSHeiko Carstens 		psw_t psw;
5279b0c632dbSHeiko Carstens 
5280bc923cc9SAvi Kivity 		r = -EFAULT;
5281b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
5282bc923cc9SAvi Kivity 			break;
5283bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5284bc923cc9SAvi Kivity 		break;
5285b0c632dbSHeiko Carstens 	}
52867de3f142SJanosch Frank 	case KVM_S390_CLEAR_RESET:
52877de3f142SJanosch Frank 		r = 0;
52887de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
52898a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
52908a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
52918a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
52928a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
52938a8378faSJanosch Frank 				   rc, rrc);
52948a8378faSJanosch Frank 		}
52957de3f142SJanosch Frank 		break;
5296b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
52977de3f142SJanosch Frank 		r = 0;
52987de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
52998a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
53008a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
53018a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_INITIAL,
53028a8378faSJanosch Frank 					  &rc, &rrc);
53038a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
53048a8378faSJanosch Frank 				   rc, rrc);
53058a8378faSJanosch Frank 		}
53067de3f142SJanosch Frank 		break;
53077de3f142SJanosch Frank 	case KVM_S390_NORMAL_RESET:
53087de3f142SJanosch Frank 		r = 0;
53097de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
53108a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
53118a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
53128a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET, &rc, &rrc);
53138a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
53148a8378faSJanosch Frank 				   rc, rrc);
53158a8378faSJanosch Frank 		}
5316bc923cc9SAvi Kivity 		break;
531714eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
531814eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
531914eebd91SCarsten Otte 		struct kvm_one_reg reg;
532068cf7b1fSJanosch Frank 		r = -EINVAL;
532168cf7b1fSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu))
532268cf7b1fSJanosch Frank 			break;
532314eebd91SCarsten Otte 		r = -EFAULT;
532414eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
532514eebd91SCarsten Otte 			break;
532614eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
532714eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
532814eebd91SCarsten Otte 		else
532914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
533014eebd91SCarsten Otte 		break;
533114eebd91SCarsten Otte 	}
533227e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
533327e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
533427e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
533527e0393fSCarsten Otte 
533627e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
533727e0393fSCarsten Otte 			r = -EFAULT;
533827e0393fSCarsten Otte 			break;
533927e0393fSCarsten Otte 		}
534027e0393fSCarsten Otte 
534127e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
534227e0393fSCarsten Otte 			r = -EINVAL;
534327e0393fSCarsten Otte 			break;
534427e0393fSCarsten Otte 		}
534527e0393fSCarsten Otte 
534627e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
534727e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
534827e0393fSCarsten Otte 		break;
534927e0393fSCarsten Otte 	}
535027e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
535127e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
535227e0393fSCarsten Otte 
535327e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
535427e0393fSCarsten Otte 			r = -EFAULT;
535527e0393fSCarsten Otte 			break;
535627e0393fSCarsten Otte 		}
535727e0393fSCarsten Otte 
535827e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
535927e0393fSCarsten Otte 			r = -EINVAL;
536027e0393fSCarsten Otte 			break;
536127e0393fSCarsten Otte 		}
536227e0393fSCarsten Otte 
536327e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
536427e0393fSCarsten Otte 			ucasmap.length);
536527e0393fSCarsten Otte 		break;
536627e0393fSCarsten Otte 	}
536727e0393fSCarsten Otte #endif
5368ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
5369527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5370ccc7910fSCarsten Otte 		break;
5371ccc7910fSCarsten Otte 	}
5372d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
5373d6712df9SCornelia Huck 	{
5374d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
5375d6712df9SCornelia Huck 		r = -EFAULT;
5376d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
5377d6712df9SCornelia Huck 			break;
5378d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5379d6712df9SCornelia Huck 		break;
5380d6712df9SCornelia Huck 	}
538141408c28SThomas Huth 	case KVM_S390_MEM_OP: {
538241408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
538341408c28SThomas Huth 
538441408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
53850e1234c0SJanis Schoetterl-Glausch 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
538641408c28SThomas Huth 		else
538741408c28SThomas Huth 			r = -EFAULT;
538841408c28SThomas Huth 		break;
538941408c28SThomas Huth 	}
5390816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
5391816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5392816c7667SJens Freimann 
5393816c7667SJens Freimann 		r = -EFAULT;
5394816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5395816c7667SJens Freimann 			break;
5396816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5397816c7667SJens Freimann 		    irq_state.len == 0 ||
5398816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5399816c7667SJens Freimann 			r = -EINVAL;
5400816c7667SJens Freimann 			break;
5401816c7667SJens Freimann 		}
5402bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5403816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
5404816c7667SJens Freimann 					   (void __user *) irq_state.buf,
5405816c7667SJens Freimann 					   irq_state.len);
5406816c7667SJens Freimann 		break;
5407816c7667SJens Freimann 	}
5408816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
5409816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5410816c7667SJens Freimann 
5411816c7667SJens Freimann 		r = -EFAULT;
5412816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5413816c7667SJens Freimann 			break;
5414816c7667SJens Freimann 		if (irq_state.len == 0) {
5415816c7667SJens Freimann 			r = -EINVAL;
5416816c7667SJens Freimann 			break;
5417816c7667SJens Freimann 		}
5418bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5419816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
5420816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
5421816c7667SJens Freimann 					   irq_state.len);
5422816c7667SJens Freimann 		break;
5423816c7667SJens Freimann 	}
54248aba0958SJanosch Frank 	case KVM_S390_PV_CPU_COMMAND: {
54258aba0958SJanosch Frank 		struct kvm_pv_cmd cmd;
54268aba0958SJanosch Frank 
54278aba0958SJanosch Frank 		r = -EINVAL;
54288aba0958SJanosch Frank 		if (!is_prot_virt_host())
54298aba0958SJanosch Frank 			break;
54308aba0958SJanosch Frank 
54318aba0958SJanosch Frank 		r = -EFAULT;
54328aba0958SJanosch Frank 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
54338aba0958SJanosch Frank 			break;
54348aba0958SJanosch Frank 
54358aba0958SJanosch Frank 		r = -EINVAL;
54368aba0958SJanosch Frank 		if (cmd.flags)
54378aba0958SJanosch Frank 			break;
54388aba0958SJanosch Frank 
54398aba0958SJanosch Frank 		/* We only handle this cmd right now */
54408aba0958SJanosch Frank 		if (cmd.cmd != KVM_PV_DUMP)
54418aba0958SJanosch Frank 			break;
54428aba0958SJanosch Frank 
54438aba0958SJanosch Frank 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
54448aba0958SJanosch Frank 
54458aba0958SJanosch Frank 		/* Always copy over UV rc / rrc data */
54468aba0958SJanosch Frank 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
54478aba0958SJanosch Frank 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
54488aba0958SJanosch Frank 			r = -EFAULT;
54498aba0958SJanosch Frank 		break;
54508aba0958SJanosch Frank 	}
5451b0c632dbSHeiko Carstens 	default:
54523e6afcf1SCarsten Otte 		r = -ENOTTY;
5453b0c632dbSHeiko Carstens 	}
54549b062471SChristoffer Dall 
54559b062471SChristoffer Dall 	vcpu_put(vcpu);
5456bc923cc9SAvi Kivity 	return r;
5457b0c632dbSHeiko Carstens }
5458b0c632dbSHeiko Carstens 
54591499fa80SSouptick Joarder vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
54605b1c1493SCarsten Otte {
54615b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
54625b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
54635b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
54645b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
54655b1c1493SCarsten Otte 		get_page(vmf->page);
54665b1c1493SCarsten Otte 		return 0;
54675b1c1493SCarsten Otte 	}
54685b1c1493SCarsten Otte #endif
54695b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
54705b1c1493SCarsten Otte }
54715b1c1493SCarsten Otte 
5472b0c632dbSHeiko Carstens /* Section: memory related */
5473f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
5474537a17b3SSean Christopherson 				   const struct kvm_memory_slot *old,
5475537a17b3SSean Christopherson 				   struct kvm_memory_slot *new,
54767b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
5477b0c632dbSHeiko Carstens {
5478ec5c8697SSean Christopherson 	gpa_t size;
5479ec5c8697SSean Christopherson 
5480ec5c8697SSean Christopherson 	/* When we are protected, we should not change the memory slots */
5481ec5c8697SSean Christopherson 	if (kvm_s390_pv_get_handle(kvm))
5482ec5c8697SSean Christopherson 		return -EINVAL;
5483ec5c8697SSean Christopherson 
5484ec5c8697SSean Christopherson 	if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
5485ec5c8697SSean Christopherson 		return 0;
5486cf5b4869SSean Christopherson 
5487dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
5488dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
5489dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
5490dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
5491b0c632dbSHeiko Carstens 
5492cf5b4869SSean Christopherson 	if (new->userspace_addr & 0xffffful)
5493b0c632dbSHeiko Carstens 		return -EINVAL;
5494b0c632dbSHeiko Carstens 
5495ec5c8697SSean Christopherson 	size = new->npages * PAGE_SIZE;
5496cf5b4869SSean Christopherson 	if (size & 0xffffful)
5497b0c632dbSHeiko Carstens 		return -EINVAL;
5498b0c632dbSHeiko Carstens 
5499cf5b4869SSean Christopherson 	if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5500a3a92c31SDominik Dingel 		return -EINVAL;
5501a3a92c31SDominik Dingel 
5502f7784b8eSMarcelo Tosatti 	return 0;
5503f7784b8eSMarcelo Tosatti }
5504f7784b8eSMarcelo Tosatti 
5505f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
55069d4c197cSSean Christopherson 				struct kvm_memory_slot *old,
5507f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
55088482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
5509f7784b8eSMarcelo Tosatti {
551019ec166cSChristian Borntraeger 	int rc = 0;
5511f7784b8eSMarcelo Tosatti 
551219ec166cSChristian Borntraeger 	switch (change) {
551319ec166cSChristian Borntraeger 	case KVM_MR_DELETE:
551419ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
551519ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
551619ec166cSChristian Borntraeger 		break;
551719ec166cSChristian Borntraeger 	case KVM_MR_MOVE:
551819ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
551919ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
552019ec166cSChristian Borntraeger 		if (rc)
552119ec166cSChristian Borntraeger 			break;
55223b684a42SJoe Perches 		fallthrough;
552319ec166cSChristian Borntraeger 	case KVM_MR_CREATE:
5524cf5b4869SSean Christopherson 		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5525cf5b4869SSean Christopherson 				      new->base_gfn * PAGE_SIZE,
5526cf5b4869SSean Christopherson 				      new->npages * PAGE_SIZE);
552719ec166cSChristian Borntraeger 		break;
552819ec166cSChristian Borntraeger 	case KVM_MR_FLAGS_ONLY:
552919ec166cSChristian Borntraeger 		break;
553019ec166cSChristian Borntraeger 	default:
553119ec166cSChristian Borntraeger 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
553219ec166cSChristian Borntraeger 	}
5533598841caSCarsten Otte 	if (rc)
5534ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
5535598841caSCarsten Otte 	return;
5536b0c632dbSHeiko Carstens }
5537b0c632dbSHeiko Carstens 
553860a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
553960a37709SAlexander Yarygin {
554060a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
554160a37709SAlexander Yarygin 
554260a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
554360a37709SAlexander Yarygin }
554460a37709SAlexander Yarygin 
5545b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
5546b0c632dbSHeiko Carstens {
554760a37709SAlexander Yarygin 	int i;
554860a37709SAlexander Yarygin 
554907197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
55508d43d570SMichael Mueller 		pr_info("SIE is not available\n");
555107197fd0SDavid Hildenbrand 		return -ENODEV;
555207197fd0SDavid Hildenbrand 	}
555307197fd0SDavid Hildenbrand 
5554a4499382SJanosch Frank 	if (nested && hpage) {
55558d43d570SMichael Mueller 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5556a4499382SJanosch Frank 		return -EINVAL;
5557a4499382SJanosch Frank 	}
5558a4499382SJanosch Frank 
555960a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
5560c3b9e3e1SChristian Borntraeger 		kvm_s390_fac_base[i] |=
556117e89e13SSven Schnelle 			stfle_fac_list[i] & nonhyp_mask(i);
556260a37709SAlexander Yarygin 
55639d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5564b0c632dbSHeiko Carstens }
5565b0c632dbSHeiko Carstens 
5566b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
5567b0c632dbSHeiko Carstens {
5568b0c632dbSHeiko Carstens 	kvm_exit();
5569b0c632dbSHeiko Carstens }
5570b0c632dbSHeiko Carstens 
5571b0c632dbSHeiko Carstens module_init(kvm_s390_init);
5572b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
5573566af940SCornelia Huck 
5574566af940SCornelia Huck /*
5575566af940SCornelia Huck  * Enable autoloading of the kvm module.
5576566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5577566af940SCornelia Huck  * since x86 takes a different approach.
5578566af940SCornelia Huck  */
5579566af940SCornelia Huck #include <linux/miscdevice.h>
5580566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
5581566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
5582