xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision b801ef42149fe5cba21bb6fe80c6d8e4c031f990)
1d809aa23SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b0c632dbSHeiko Carstens /*
3bb64da9aSChristian Borntraeger  * hosting IBM Z kernel virtual machines (s390x)
4b0c632dbSHeiko Carstens  *
53e6c5568SJanosch Frank  * Copyright IBM Corp. 2008, 2020
6b0c632dbSHeiko Carstens  *
7b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
9628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1015f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
11b0c632dbSHeiko Carstens  */
12b0c632dbSHeiko Carstens 
137aedd9d4SMichael Mueller #define KMSG_COMPONENT "kvm-s390"
147aedd9d4SMichael Mueller #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
157aedd9d4SMichael Mueller 
16b0c632dbSHeiko Carstens #include <linux/compiler.h>
17b0c632dbSHeiko Carstens #include <linux/err.h>
18b0c632dbSHeiko Carstens #include <linux/fs.h>
19ca872302SChristian Borntraeger #include <linux/hrtimer.h>
20b0c632dbSHeiko Carstens #include <linux/init.h>
21b0c632dbSHeiko Carstens #include <linux/kvm.h>
22b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
23b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25d3217967SPaul Gortmaker #include <linux/moduleparam.h>
26a374e892STony Krowiak #include <linux/random.h>
27b0c632dbSHeiko Carstens #include <linux/slab.h>
28ba5c1e9bSCarsten Otte #include <linux/timer.h>
2941408c28SThomas Huth #include <linux/vmalloc.h>
3015c9705fSDavid Hildenbrand #include <linux/bitmap.h>
31174cd4b1SIngo Molnar #include <linux/sched/signal.h>
32190df4a2SClaudio Imbrenda #include <linux/string.h>
3365fddcfcSMike Rapoport #include <linux/pgtable.h>
34ca2fd060SClaudio Imbrenda #include <linux/mmu_notifier.h>
35174cd4b1SIngo Molnar 
36cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
37b0c632dbSHeiko Carstens #include <asm/lowcore.h>
38fd5ada04SMartin Schwidefsky #include <asm/stp.h>
391e133ab2SMartin Schwidefsky #include <asm/gmap.h>
40f5daba1dSHeiko Carstens #include <asm/nmi.h>
41a0616cdeSDavid Howells #include <asm/switch_to.h>
426d3da241SJens Freimann #include <asm/isc.h>
431526bf9cSChristian Borntraeger #include <asm/sclp.h>
440a763c78SDavid Hildenbrand #include <asm/cpacf.h>
45221bb8a4SLinus Torvalds #include <asm/timex.h>
46e585b24aSTony Krowiak #include <asm/ap.h>
4729b40f10SJanosch Frank #include <asm/uv.h>
4856e62a73SSven Schnelle #include <asm/fpu/api.h>
498f2abe6aSChristian Borntraeger #include "kvm-s390.h"
50b0c632dbSHeiko Carstens #include "gaccess.h"
5198b1d33dSMatthew Rosato #include "pci.h"
52b0c632dbSHeiko Carstens 
535786fffaSCornelia Huck #define CREATE_TRACE_POINTS
545786fffaSCornelia Huck #include "trace.h"
55ade38c31SCornelia Huck #include "trace-s390.h"
565786fffaSCornelia Huck 
5741408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
58816c7667SJens Freimann #define LOCAL_IRQS 32
59816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
6141408c28SThomas Huth 
62fcfe1baeSJing Zhang const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
63fcfe1baeSJing Zhang 	KVM_GENERIC_VM_STATS(),
64fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_io),
65fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_float_mchk),
66fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_pfault_done),
67fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_service_signal),
6873f91b00SMatthew Rosato 	STATS_DESC_COUNTER(VM, inject_virtio),
6973f91b00SMatthew Rosato 	STATS_DESC_COUNTER(VM, aen_forward)
70fcfe1baeSJing Zhang };
71fcfe1baeSJing Zhang 
72fcfe1baeSJing Zhang const struct kvm_stats_header kvm_vm_stats_header = {
73fcfe1baeSJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
74fcfe1baeSJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75fcfe1baeSJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
76fcfe1baeSJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77fcfe1baeSJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78fcfe1baeSJing Zhang 		       sizeof(kvm_vm_stats_desc),
79fcfe1baeSJing Zhang };
80fcfe1baeSJing Zhang 
81ce55c049SJing Zhang const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82ce55c049SJing Zhang 	KVM_GENERIC_VCPU_STATS(),
83ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_userspace),
84ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_null),
85ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_request),
86ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_io_request),
87ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
89ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_validity),
90ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instruction),
91ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_pei),
92ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
94ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
96ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
97ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
101ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
102ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
103ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
106ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_program),
110ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_io),
111ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
113ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_ckc),
114ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_cputm),
115ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_external_call),
116ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_mchk),
118ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_program),
120ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_restart),
121ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
124ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_gs),
125ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
126ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
130ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sck),
131ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
133ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_spx),
134ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
135ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stap),
136ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_iske),
137ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ri),
138ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sske),
140ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
142ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
143ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tb),
144ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
145ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
146ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
147ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sie),
148ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_essa),
149ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
166bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
167bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
168bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
169bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
170bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, diag_9c_forward),
171bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
172bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
173bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
174bb000f64SChristian Borntraeger 	STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
175ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, pfault_sync)
176ce55c049SJing Zhang };
177ce55c049SJing Zhang 
178ce55c049SJing Zhang const struct kvm_stats_header kvm_vcpu_stats_header = {
179ce55c049SJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
180ce55c049SJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
181ce55c049SJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
182ce55c049SJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
183ce55c049SJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
184ce55c049SJing Zhang 		       sizeof(kvm_vcpu_stats_desc),
185ce55c049SJing Zhang };
186ce55c049SJing Zhang 
187a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
188a411edf1SDavid Hildenbrand static int nested;
189a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
190a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
191a411edf1SDavid Hildenbrand 
192a4499382SJanosch Frank /* allow 1m huge page guest backing, if !nested */
193a4499382SJanosch Frank static int hpage;
194a4499382SJanosch Frank module_param(hpage, int, 0444);
195a4499382SJanosch Frank MODULE_PARM_DESC(hpage, "1m huge page backing support");
196b0c632dbSHeiko Carstens 
1978b905d28SChristian Borntraeger /* maximum percentage of steal time for polling.  >100 is treated like 100 */
1988b905d28SChristian Borntraeger static u8 halt_poll_max_steal = 10;
1998b905d28SChristian Borntraeger module_param(halt_poll_max_steal, byte, 0644);
200b41fb528SWei Yongjun MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
2018b905d28SChristian Borntraeger 
202cc674ef2SMichael Mueller /* if set to true, the GISA will be initialized and used if available */
203cc674ef2SMichael Mueller static bool use_gisa  = true;
204cc674ef2SMichael Mueller module_param(use_gisa, bool, 0644);
205cc674ef2SMichael Mueller MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
206cc674ef2SMichael Mueller 
20787e28a15SPierre Morel /* maximum diag9c forwarding per second */
20887e28a15SPierre Morel unsigned int diag9c_forwarding_hz;
20987e28a15SPierre Morel module_param(diag9c_forwarding_hz, uint, 0644);
21087e28a15SPierre Morel MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
21187e28a15SPierre Morel 
212c3b9e3e1SChristian Borntraeger /*
213cc726886SClaudio Imbrenda  * allow asynchronous deinit for protected guests; enable by default since
214cc726886SClaudio Imbrenda  * the feature is opt-in anyway
215cc726886SClaudio Imbrenda  */
216cc726886SClaudio Imbrenda static int async_destroy = 1;
217cc726886SClaudio Imbrenda module_param(async_destroy, int, 0444);
218cc726886SClaudio Imbrenda MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
219fb491d55SClaudio Imbrenda 
220c3b9e3e1SChristian Borntraeger /*
221c3b9e3e1SChristian Borntraeger  * For now we handle at most 16 double words as this is what the s390 base
222c3b9e3e1SChristian Borntraeger  * kernel handles and stores in the prefix page. If we ever need to go beyond
223c3b9e3e1SChristian Borntraeger  * this, this requires changes to code, but the external uapi can stay.
224c3b9e3e1SChristian Borntraeger  */
225c3b9e3e1SChristian Borntraeger #define SIZE_INTERNAL 16
226c3b9e3e1SChristian Borntraeger 
227c3b9e3e1SChristian Borntraeger /*
228c3b9e3e1SChristian Borntraeger  * Base feature mask that defines default mask for facilities. Consists of the
229c3b9e3e1SChristian Borntraeger  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
230c3b9e3e1SChristian Borntraeger  */
231c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
232c3b9e3e1SChristian Borntraeger /*
233c3b9e3e1SChristian Borntraeger  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
234c3b9e3e1SChristian Borntraeger  * and defines the facilities that can be enabled via a cpu model.
235c3b9e3e1SChristian Borntraeger  */
236c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
237c3b9e3e1SChristian Borntraeger 
238c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_size(void)
23978c4b59fSMichael Mueller {
240c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
241c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
242c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
24317e89e13SSven Schnelle 		sizeof(stfle_fac_list));
244c3b9e3e1SChristian Borntraeger 
245c3b9e3e1SChristian Borntraeger 	return SIZE_INTERNAL;
24678c4b59fSMichael Mueller }
24778c4b59fSMichael Mueller 
24815c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
24915c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
2500a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
2510a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
25215c9705fSDavid Hildenbrand 
2539d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
254a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
25578f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
2563e6c5568SJanosch Frank debug_info_t *kvm_s390_dbf_uv;
2579d8d5786SMichael Mueller 
258b0c632dbSHeiko Carstens /* Section: not file related */
25913a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
260b0c632dbSHeiko Carstens {
261b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
26210474ae8SAlexander Graf 	return 0;
263b0c632dbSHeiko Carstens }
264b0c632dbSHeiko Carstens 
265b9904085SSean Christopherson int kvm_arch_check_processor_compat(void *opaque)
266f257d6dcSSean Christopherson {
267f257d6dcSSean Christopherson 	return 0;
268f257d6dcSSean Christopherson }
269f257d6dcSSean Christopherson 
27029b40f10SJanosch Frank /* forward declarations */
271414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
272414d3b07SMartin Schwidefsky 			      unsigned long end);
27329b40f10SJanosch Frank static int sca_switch_to_extended(struct kvm *kvm);
2742c70fe44SChristian Borntraeger 
2751575767eSDavid Hildenbrand static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
2761575767eSDavid Hildenbrand {
2771575767eSDavid Hildenbrand 	u8 delta_idx = 0;
2781575767eSDavid Hildenbrand 
2791575767eSDavid Hildenbrand 	/*
2801575767eSDavid Hildenbrand 	 * The TOD jumps by delta, we have to compensate this by adding
2811575767eSDavid Hildenbrand 	 * -delta to the epoch.
2821575767eSDavid Hildenbrand 	 */
2831575767eSDavid Hildenbrand 	delta = -delta;
2841575767eSDavid Hildenbrand 
2851575767eSDavid Hildenbrand 	/* sign-extension - we're adding to signed values below */
2861575767eSDavid Hildenbrand 	if ((s64)delta < 0)
2871575767eSDavid Hildenbrand 		delta_idx = -1;
2881575767eSDavid Hildenbrand 
2891575767eSDavid Hildenbrand 	scb->epoch += delta;
2901575767eSDavid Hildenbrand 	if (scb->ecd & ECD_MEF) {
2911575767eSDavid Hildenbrand 		scb->epdx += delta_idx;
2921575767eSDavid Hildenbrand 		if (scb->epoch < delta)
2931575767eSDavid Hildenbrand 			scb->epdx += 1;
2941575767eSDavid Hildenbrand 	}
2951575767eSDavid Hildenbrand }
2961575767eSDavid Hildenbrand 
297fdf03650SFan Zhang /*
298fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
299fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
300fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
301fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
302fdf03650SFan Zhang  */
303fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
304fdf03650SFan Zhang 			  void *v)
305fdf03650SFan Zhang {
306fdf03650SFan Zhang 	struct kvm *kvm;
307fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
30846808a4cSMarc Zyngier 	unsigned long i;
309fdf03650SFan Zhang 	unsigned long long *delta = v;
310fdf03650SFan Zhang 
311fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
312fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
3131575767eSDavid Hildenbrand 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
3141575767eSDavid Hildenbrand 			if (i == 0) {
3151575767eSDavid Hildenbrand 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
3161575767eSDavid Hildenbrand 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
3171575767eSDavid Hildenbrand 			}
318db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
319db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
32091473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
3211575767eSDavid Hildenbrand 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
3221575767eSDavid Hildenbrand 						   *delta);
323fdf03650SFan Zhang 		}
324fdf03650SFan Zhang 	}
325fdf03650SFan Zhang 	return NOTIFY_OK;
326fdf03650SFan Zhang }
327fdf03650SFan Zhang 
328fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
329fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
330fdf03650SFan Zhang };
331fdf03650SFan Zhang 
332b9904085SSean Christopherson int kvm_arch_hardware_setup(void *opaque)
333b0c632dbSHeiko Carstens {
3342c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
335b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
336a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
337a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
338fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
339fdf03650SFan Zhang 				       &kvm_clock_notifier);
340b0c632dbSHeiko Carstens 	return 0;
341b0c632dbSHeiko Carstens }
342b0c632dbSHeiko Carstens 
343b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
344b0c632dbSHeiko Carstens {
345b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
346a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
347fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
348fdf03650SFan Zhang 					 &kvm_clock_notifier);
349b0c632dbSHeiko Carstens }
350b0c632dbSHeiko Carstens 
35122be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
35222be5a13SDavid Hildenbrand {
35322be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
35422be5a13SDavid Hildenbrand }
35522be5a13SDavid Hildenbrand 
3560a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
3570a763c78SDavid Hildenbrand {
3584fa3b91bSHeiko Carstens 	unsigned long function = (unsigned long)nr | 0x100;
359d051ae53SHeiko Carstens 	int cc;
3600a763c78SDavid Hildenbrand 
3610a763c78SDavid Hildenbrand 	asm volatile(
3624fa3b91bSHeiko Carstens 		"	lgr	0,%[function]\n"
3630a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
3640a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
3650a763c78SDavid Hildenbrand 		"	ipm	%0\n"
3660a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
3670a763c78SDavid Hildenbrand 		: "=d" (cc)
3684fa3b91bSHeiko Carstens 		: [function] "d" (function)
3694fa3b91bSHeiko Carstens 		: "cc", "0");
3700a763c78SDavid Hildenbrand 	return cc == 0;
3710a763c78SDavid Hildenbrand }
3720a763c78SDavid Hildenbrand 
373d0dea733SHeiko Carstens static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
374d6681397SChristian Borntraeger {
375d6681397SChristian Borntraeger 	asm volatile(
3764fa3b91bSHeiko Carstens 		"	lghi	0,0\n"
3774fa3b91bSHeiko Carstens 		"	lgr	1,%[query]\n"
3784fa3b91bSHeiko Carstens 		/* Parameter registers are ignored */
379d6681397SChristian Borntraeger 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
380b1c41ac3SHeiko Carstens 		:
3814fa3b91bSHeiko Carstens 		: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
3824fa3b91bSHeiko Carstens 		: "cc", "memory", "0", "1");
383d6681397SChristian Borntraeger }
384d6681397SChristian Borntraeger 
385173aec2dSChristian Borntraeger #define INSN_SORTL 0xb938
3864f45b90eSChristian Borntraeger #define INSN_DFLTCC 0xb939
387173aec2dSChristian Borntraeger 
38822be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
38922be5a13SDavid Hildenbrand {
3900a763c78SDavid Hildenbrand 	int i;
3910a763c78SDavid Hildenbrand 
3920a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
3930a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
3940a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
3950a763c78SDavid Hildenbrand 	}
3960a763c78SDavid Hildenbrand 
3970a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
398221bb8a4SLinus Torvalds 		ptff(kvm_s390_available_subfunc.ptff,
399221bb8a4SLinus Torvalds 		     sizeof(kvm_s390_available_subfunc.ptff),
400221bb8a4SLinus Torvalds 		     PTFF_QAF);
4010a763c78SDavid Hildenbrand 
4020a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
40369c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
40469c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmac);
40569c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
40669c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmc);
40769c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
40869c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.km);
40969c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
41069c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kimd);
41169c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
41269c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.klmd);
4130a763c78SDavid Hildenbrand 	}
4140a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
41569c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
41669c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pckmo);
4170a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
41869c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
41969c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmctr);
42069c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
42169c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmf);
42269c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
42369c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmo);
42469c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
42569c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pcc);
4260a763c78SDavid Hildenbrand 	}
4270a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
428985a9d20SHarald Freudenberger 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
42969c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.ppno);
4300a763c78SDavid Hildenbrand 
431e000b8e0SJason J. Herne 	if (test_facility(146)) /* MSA8 */
432e000b8e0SJason J. Herne 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
433e000b8e0SJason J. Herne 			      kvm_s390_available_subfunc.kma);
434e000b8e0SJason J. Herne 
43513209ad0SChristian Borntraeger 	if (test_facility(155)) /* MSA9 */
43613209ad0SChristian Borntraeger 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
43713209ad0SChristian Borntraeger 			      kvm_s390_available_subfunc.kdsa);
43813209ad0SChristian Borntraeger 
439173aec2dSChristian Borntraeger 	if (test_facility(150)) /* SORTL */
440173aec2dSChristian Borntraeger 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
441173aec2dSChristian Borntraeger 
4424f45b90eSChristian Borntraeger 	if (test_facility(151)) /* DFLTCC */
4434f45b90eSChristian Borntraeger 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
4444f45b90eSChristian Borntraeger 
44522be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
44622be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
447a3508fbeSDavid Hildenbrand 	/*
448a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
449a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
450a3508fbeSDavid Hildenbrand 	 */
451a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
452a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
453a3508fbeSDavid Hildenbrand 		return;
454a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
45519c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
45619c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
4570615a326SDavid Hildenbrand 	if (sclp.has_siif)
4580615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
45977d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
46077d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
461a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
462a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
4635630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
4645630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
46513ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
46613ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
4677fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
4687fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
469730cd632SFarhan Ali 	if (sclp.has_kss)
470730cd632SFarhan Ali 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
4715d3876a8SDavid Hildenbrand 	/*
4725d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
4735d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
4745d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
4755d3876a8SDavid Hildenbrand 	 *
4765d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
4775d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
4785d3876a8SDavid Hildenbrand 	 *
4795d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
4805d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
4815d3876a8SDavid Hildenbrand 	 *
4825d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
4835d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
4845d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
4855d3876a8SDavid Hildenbrand 	 *
4865d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
4875d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
4885d3876a8SDavid Hildenbrand 	 */
48922be5a13SDavid Hildenbrand }
49022be5a13SDavid Hildenbrand 
491b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
492b0c632dbSHeiko Carstens {
493f76f6371SJanosch Frank 	int rc = -ENOMEM;
494308c3e66SMichael Mueller 
49578f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
49678f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
49778f26131SChristian Borntraeger 		return -ENOMEM;
49878f26131SChristian Borntraeger 
4993e6c5568SJanosch Frank 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
5003e6c5568SJanosch Frank 	if (!kvm_s390_dbf_uv)
501*b801ef42SSean Christopherson 		goto err_kvm_uv;
5023e6c5568SJanosch Frank 
5033e6c5568SJanosch Frank 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
5043e6c5568SJanosch Frank 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
505*b801ef42SSean Christopherson 		goto err_debug_view;
50678f26131SChristian Borntraeger 
50722be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
50822be5a13SDavid Hildenbrand 
50984877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
510308c3e66SMichael Mueller 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
511308c3e66SMichael Mueller 	if (rc) {
5128d43d570SMichael Mueller 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
513*b801ef42SSean Christopherson 		goto err_flic;
514308c3e66SMichael Mueller 	}
515b1d1e76eSMichael Mueller 
516189e7d87SMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
51798b1d33dSMatthew Rosato 		rc = kvm_s390_pci_init();
51898b1d33dSMatthew Rosato 		if (rc) {
51998b1d33dSMatthew Rosato 			pr_err("Unable to allocate AIFT for PCI\n");
520*b801ef42SSean Christopherson 			goto err_pci;
52198b1d33dSMatthew Rosato 		}
52298b1d33dSMatthew Rosato 	}
52398b1d33dSMatthew Rosato 
524b1d1e76eSMichael Mueller 	rc = kvm_s390_gib_init(GAL_ISC);
525b1d1e76eSMichael Mueller 	if (rc)
526*b801ef42SSean Christopherson 		goto err_gib;
527b1d1e76eSMichael Mueller 
528308c3e66SMichael Mueller 	return 0;
529308c3e66SMichael Mueller 
530*b801ef42SSean Christopherson err_gib:
531*b801ef42SSean Christopherson 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
532*b801ef42SSean Christopherson 		kvm_s390_pci_exit();
533*b801ef42SSean Christopherson err_pci:
534*b801ef42SSean Christopherson err_flic:
535*b801ef42SSean Christopherson err_debug_view:
536*b801ef42SSean Christopherson 	debug_unregister(kvm_s390_dbf_uv);
537*b801ef42SSean Christopherson err_kvm_uv:
538*b801ef42SSean Christopherson 	debug_unregister(kvm_s390_dbf);
539308c3e66SMichael Mueller 	return rc;
540b0c632dbSHeiko Carstens }
541b0c632dbSHeiko Carstens 
54278f26131SChristian Borntraeger void kvm_arch_exit(void)
54378f26131SChristian Borntraeger {
5441282c21eSMichael Mueller 	kvm_s390_gib_destroy();
545189e7d87SMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
54698b1d33dSMatthew Rosato 		kvm_s390_pci_exit();
54778f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
5483e6c5568SJanosch Frank 	debug_unregister(kvm_s390_dbf_uv);
54978f26131SChristian Borntraeger }
55078f26131SChristian Borntraeger 
551b0c632dbSHeiko Carstens /* Section: device related */
552b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
553b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
554b0c632dbSHeiko Carstens {
555b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
556b0c632dbSHeiko Carstens 		return s390_enable_sie();
557b0c632dbSHeiko Carstens 	return -EINVAL;
558b0c632dbSHeiko Carstens }
559b0c632dbSHeiko Carstens 
560784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
561b0c632dbSHeiko Carstens {
562d7b0b5ebSCarsten Otte 	int r;
563d7b0b5ebSCarsten Otte 
5642bd0ac4eSCarsten Otte 	switch (ext) {
565d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
566b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
56752e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
5681efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
5691efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
5701efd0f59SCarsten Otte #endif
5713c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
57260b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
57314eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
574d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
575fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
57610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
577c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
57878599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
579f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
5806352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
581460df4c1SPaolo Bonzini 	case KVM_CAP_IMMEDIATE_EXIT:
58247b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
5832444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
584e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
58530ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
586816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
5876502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
5884036e387SClaudio Imbrenda 	case KVM_CAP_S390_CMMA_MIGRATION:
58947a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
590da9a1446SChristian Borntraeger 	case KVM_CAP_S390_AIS_MIGRATION:
5917de3f142SJanosch Frank 	case KVM_CAP_S390_VCPU_RESETS:
592b9b2782cSPeter Xu 	case KVM_CAP_SET_GUEST_DEBUG:
59323a60f83SCollin Walling 	case KVM_CAP_S390_DIAG318:
594d004079eSJanis Schoetterl-Glausch 	case KVM_CAP_S390_MEM_OP_EXTENSION:
595d7b0b5ebSCarsten Otte 		r = 1;
596d7b0b5ebSCarsten Otte 		break;
597a43b80b7SMaxim Levitsky 	case KVM_CAP_SET_GUEST_DEBUG2:
598a43b80b7SMaxim Levitsky 		r = KVM_GUESTDBG_VALID_MASK;
599a43b80b7SMaxim Levitsky 		break;
600a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
601a4499382SJanosch Frank 		r = 0;
60240ebdb8eSJanosch Frank 		if (hpage && !kvm_is_ucontrol(kvm))
603a4499382SJanosch Frank 			r = 1;
604a4499382SJanosch Frank 		break;
60541408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
60641408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
60741408c28SThomas Huth 		break;
608e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
609e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
610a86cb413SThomas Huth 	case KVM_CAP_MAX_VCPU_ID:
61176a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
612a6940674SDavid Hildenbrand 		if (!kvm_s390_use_sca_entries())
613a6940674SDavid Hildenbrand 			r = KVM_MAX_VCPUS;
614a6940674SDavid Hildenbrand 		else if (sclp.has_esca && sclp.has_64bscao)
61576a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
61682cc27efSVitaly Kuznetsov 		if (ext == KVM_CAP_NR_VCPUS)
61782cc27efSVitaly Kuznetsov 			r = min_t(unsigned int, num_online_cpus(), r);
618e726b1bdSChristian Borntraeger 		break;
6191526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
620abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
6211526bf9cSChristian Borntraeger 		break;
62268c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
62368c55750SEric Farman 		r = MACHINE_HAS_VX;
62468c55750SEric Farman 		break;
625c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
626c6e5f166SFan Zhang 		r = test_facility(64);
627c6e5f166SFan Zhang 		break;
6284e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
6294e0b1ab7SFan Zhang 		r = test_facility(133);
6304e0b1ab7SFan Zhang 		break;
63135b3fde6SChristian Borntraeger 	case KVM_CAP_S390_BPB:
63235b3fde6SChristian Borntraeger 		r = test_facility(82);
63335b3fde6SChristian Borntraeger 		break;
6348c516b25SClaudio Imbrenda 	case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
6358c516b25SClaudio Imbrenda 		r = async_destroy && is_prot_virt_host();
6368c516b25SClaudio Imbrenda 		break;
63713da9ae1SChristian Borntraeger 	case KVM_CAP_S390_PROTECTED:
63813da9ae1SChristian Borntraeger 		r = is_prot_virt_host();
63913da9ae1SChristian Borntraeger 		break;
640e9bf3acbSJanosch Frank 	case KVM_CAP_S390_PROTECTED_DUMP: {
641e9bf3acbSJanosch Frank 		u64 pv_cmds_dump[] = {
642e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_INIT,
643e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
644e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_CPU,
645e9bf3acbSJanosch Frank 			BIT_UVC_CMD_DUMP_COMPLETE,
646e9bf3acbSJanosch Frank 		};
647e9bf3acbSJanosch Frank 		int i;
648e9bf3acbSJanosch Frank 
649e9bf3acbSJanosch Frank 		r = is_prot_virt_host();
650e9bf3acbSJanosch Frank 
651e9bf3acbSJanosch Frank 		for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
652e9bf3acbSJanosch Frank 			if (!test_bit_inv(pv_cmds_dump[i],
653e9bf3acbSJanosch Frank 					  (unsigned long *)&uv_info.inst_calls_list)) {
654e9bf3acbSJanosch Frank 				r = 0;
655e9bf3acbSJanosch Frank 				break;
656e9bf3acbSJanosch Frank 			}
657e9bf3acbSJanosch Frank 		}
658e9bf3acbSJanosch Frank 		break;
659e9bf3acbSJanosch Frank 	}
660db1c875eSMatthew Rosato 	case KVM_CAP_S390_ZPCI_OP:
661db1c875eSMatthew Rosato 		r = kvm_s390_pci_interp_allowed();
662db1c875eSMatthew Rosato 		break;
663f5ecfee9SPierre Morel 	case KVM_CAP_S390_CPU_TOPOLOGY:
664f5ecfee9SPierre Morel 		r = test_facility(11);
665f5ecfee9SPierre Morel 		break;
6662bd0ac4eSCarsten Otte 	default:
667d7b0b5ebSCarsten Otte 		r = 0;
668b0c632dbSHeiko Carstens 	}
669d7b0b5ebSCarsten Otte 	return r;
6702bd0ac4eSCarsten Otte }
671b0c632dbSHeiko Carstens 
6720dff0846SSean Christopherson void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
67315f36ebdSJason J. Herne {
6740959e168SJanosch Frank 	int i;
67515f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
6760959e168SJanosch Frank 	unsigned long gaddr, vmaddr;
67715f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
6780959e168SJanosch Frank 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
67915f36ebdSJason J. Herne 
6800959e168SJanosch Frank 	/* Loop over all guest segments */
6810959e168SJanosch Frank 	cur_gfn = memslot->base_gfn;
68215f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
6830959e168SJanosch Frank 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
6840959e168SJanosch Frank 		gaddr = gfn_to_gpa(cur_gfn);
6850959e168SJanosch Frank 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
6860959e168SJanosch Frank 		if (kvm_is_error_hva(vmaddr))
6870959e168SJanosch Frank 			continue;
68815f36ebdSJason J. Herne 
6890959e168SJanosch Frank 		bitmap_zero(bitmap, _PAGE_ENTRIES);
6900959e168SJanosch Frank 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
6910959e168SJanosch Frank 		for (i = 0; i < _PAGE_ENTRIES; i++) {
6920959e168SJanosch Frank 			if (test_bit(i, bitmap))
6930959e168SJanosch Frank 				mark_page_dirty(kvm, cur_gfn + i);
6940959e168SJanosch Frank 		}
6950959e168SJanosch Frank 
6961763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
6971763f8d0SChristian Borntraeger 			return;
69870c88a00SChristian Borntraeger 		cond_resched();
69915f36ebdSJason J. Herne 	}
70015f36ebdSJason J. Herne }
70115f36ebdSJason J. Herne 
702b0c632dbSHeiko Carstens /* Section: vm related */
703a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
704a6e2f683SEugene (jno) Dvurechenski 
705b0c632dbSHeiko Carstens /*
706b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
707b0c632dbSHeiko Carstens  */
708b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
709b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
710b0c632dbSHeiko Carstens {
71115f36ebdSJason J. Herne 	int r;
71215f36ebdSJason J. Herne 	unsigned long n;
71315f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
7142a49f61dSSean Christopherson 	int is_dirty;
71515f36ebdSJason J. Herne 
716e1e8a962SJanosch Frank 	if (kvm_is_ucontrol(kvm))
717e1e8a962SJanosch Frank 		return -EINVAL;
718e1e8a962SJanosch Frank 
71915f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
72015f36ebdSJason J. Herne 
72115f36ebdSJason J. Herne 	r = -EINVAL;
72215f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
72315f36ebdSJason J. Herne 		goto out;
72415f36ebdSJason J. Herne 
7252a49f61dSSean Christopherson 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
72615f36ebdSJason J. Herne 	if (r)
72715f36ebdSJason J. Herne 		goto out;
72815f36ebdSJason J. Herne 
72915f36ebdSJason J. Herne 	/* Clear the dirty log */
73015f36ebdSJason J. Herne 	if (is_dirty) {
73115f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
73215f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
73315f36ebdSJason J. Herne 	}
73415f36ebdSJason J. Herne 	r = 0;
73515f36ebdSJason J. Herne out:
73615f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
73715f36ebdSJason J. Herne 	return r;
738b0c632dbSHeiko Carstens }
739b0c632dbSHeiko Carstens 
7406502a34cSDavid Hildenbrand static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
7416502a34cSDavid Hildenbrand {
74246808a4cSMarc Zyngier 	unsigned long i;
7436502a34cSDavid Hildenbrand 	struct kvm_vcpu *vcpu;
7446502a34cSDavid Hildenbrand 
7456502a34cSDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
7466502a34cSDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
7476502a34cSDavid Hildenbrand 	}
7486502a34cSDavid Hildenbrand }
7496502a34cSDavid Hildenbrand 
750e5d83c74SPaolo Bonzini int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
751d938dc55SCornelia Huck {
752d938dc55SCornelia Huck 	int r;
753d938dc55SCornelia Huck 
754d938dc55SCornelia Huck 	if (cap->flags)
755d938dc55SCornelia Huck 		return -EINVAL;
756d938dc55SCornelia Huck 
757d938dc55SCornelia Huck 	switch (cap->cap) {
75884223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
759c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
76084223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
76184223598SCornelia Huck 		r = 0;
76284223598SCornelia Huck 		break;
7632444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
764c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
7652444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
7662444b352SDavid Hildenbrand 		r = 0;
7672444b352SDavid Hildenbrand 		break;
76868c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
7695967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
770a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
7715967c17bSDavid Hildenbrand 			r = -EBUSY;
7725967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
773c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
774c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
7752f87d942SGuenther Hutzl 			if (test_facility(134)) {
7762f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
7772f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_list, 134);
7782f87d942SGuenther Hutzl 			}
77953743aa7SMaxim Samoylov 			if (test_facility(135)) {
78053743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
78153743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_list, 135);
78253743aa7SMaxim Samoylov 			}
7837832e91cSChristian Borntraeger 			if (test_facility(148)) {
7847832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
7857832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 148);
7867832e91cSChristian Borntraeger 			}
787d5cb6ab1SChristian Borntraeger 			if (test_facility(152)) {
788d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
789d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 152);
790d5cb6ab1SChristian Borntraeger 			}
7911f703d2cSChristian Borntraeger 			if (test_facility(192)) {
7921f703d2cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 192);
7931f703d2cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 192);
7941f703d2cSChristian Borntraeger 			}
79518280d8bSMichael Mueller 			r = 0;
79618280d8bSMichael Mueller 		} else
79718280d8bSMichael Mueller 			r = -EINVAL;
7985967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
799c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
800c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
80168c55750SEric Farman 		break;
802c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
803c6e5f166SFan Zhang 		r = -EINVAL;
804c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
805a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
806c6e5f166SFan Zhang 			r = -EBUSY;
807c6e5f166SFan Zhang 		} else if (test_facility(64)) {
808c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
809c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
810c6e5f166SFan Zhang 			r = 0;
811c6e5f166SFan Zhang 		}
812c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
813c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
814c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
815c6e5f166SFan Zhang 		break;
81647a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
81747a4693eSYi Min Zhao 		mutex_lock(&kvm->lock);
81847a4693eSYi Min Zhao 		if (kvm->created_vcpus) {
81947a4693eSYi Min Zhao 			r = -EBUSY;
82047a4693eSYi Min Zhao 		} else {
82147a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
82247a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_list, 72);
82347a4693eSYi Min Zhao 			r = 0;
82447a4693eSYi Min Zhao 		}
82547a4693eSYi Min Zhao 		mutex_unlock(&kvm->lock);
82647a4693eSYi Min Zhao 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
82747a4693eSYi Min Zhao 			 r ? "(not available)" : "(success)");
82847a4693eSYi Min Zhao 		break;
8294e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
8304e0b1ab7SFan Zhang 		r = -EINVAL;
8314e0b1ab7SFan Zhang 		mutex_lock(&kvm->lock);
832241e3ec0SChristian Borntraeger 		if (kvm->created_vcpus) {
8334e0b1ab7SFan Zhang 			r = -EBUSY;
8344e0b1ab7SFan Zhang 		} else if (test_facility(133)) {
8354e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
8364e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_list, 133);
8374e0b1ab7SFan Zhang 			r = 0;
8384e0b1ab7SFan Zhang 		}
8394e0b1ab7SFan Zhang 		mutex_unlock(&kvm->lock);
8404e0b1ab7SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
8414e0b1ab7SFan Zhang 			 r ? "(not available)" : "(success)");
8424e0b1ab7SFan Zhang 		break;
843a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
844a4499382SJanosch Frank 		mutex_lock(&kvm->lock);
845a4499382SJanosch Frank 		if (kvm->created_vcpus)
846a4499382SJanosch Frank 			r = -EBUSY;
84740ebdb8eSJanosch Frank 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
848a4499382SJanosch Frank 			r = -EINVAL;
849a4499382SJanosch Frank 		else {
850a4499382SJanosch Frank 			r = 0;
851d8ed45c5SMichel Lespinasse 			mmap_write_lock(kvm->mm);
852a4499382SJanosch Frank 			kvm->mm->context.allow_gmap_hpage_1m = 1;
853d8ed45c5SMichel Lespinasse 			mmap_write_unlock(kvm->mm);
854a4499382SJanosch Frank 			/*
855a4499382SJanosch Frank 			 * We might have to create fake 4k page
856a4499382SJanosch Frank 			 * tables. To avoid that the hardware works on
857a4499382SJanosch Frank 			 * stale PGSTEs, we emulate these instructions.
858a4499382SJanosch Frank 			 */
859a4499382SJanosch Frank 			kvm->arch.use_skf = 0;
860a4499382SJanosch Frank 			kvm->arch.use_pfmfi = 0;
861a4499382SJanosch Frank 		}
862a4499382SJanosch Frank 		mutex_unlock(&kvm->lock);
863a4499382SJanosch Frank 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
864a4499382SJanosch Frank 			 r ? "(not available)" : "(success)");
865a4499382SJanosch Frank 		break;
866e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
867c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
868e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
869e44fc8c9SEkaterina Tumanova 		r = 0;
870e44fc8c9SEkaterina Tumanova 		break;
8716502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
8726502a34cSDavid Hildenbrand 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
8736502a34cSDavid Hildenbrand 		kvm->arch.user_instr0 = 1;
8746502a34cSDavid Hildenbrand 		icpt_operexc_on_all_vcpus(kvm);
8756502a34cSDavid Hildenbrand 		r = 0;
8766502a34cSDavid Hildenbrand 		break;
877f5ecfee9SPierre Morel 	case KVM_CAP_S390_CPU_TOPOLOGY:
878f5ecfee9SPierre Morel 		r = -EINVAL;
879f5ecfee9SPierre Morel 		mutex_lock(&kvm->lock);
880f5ecfee9SPierre Morel 		if (kvm->created_vcpus) {
881f5ecfee9SPierre Morel 			r = -EBUSY;
882f5ecfee9SPierre Morel 		} else if (test_facility(11)) {
883f5ecfee9SPierre Morel 			set_kvm_facility(kvm->arch.model.fac_mask, 11);
884f5ecfee9SPierre Morel 			set_kvm_facility(kvm->arch.model.fac_list, 11);
885f5ecfee9SPierre Morel 			r = 0;
886f5ecfee9SPierre Morel 		}
887f5ecfee9SPierre Morel 		mutex_unlock(&kvm->lock);
888f5ecfee9SPierre Morel 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
889f5ecfee9SPierre Morel 			 r ? "(not available)" : "(success)");
890f5ecfee9SPierre Morel 		break;
891d938dc55SCornelia Huck 	default:
892d938dc55SCornelia Huck 		r = -EINVAL;
893d938dc55SCornelia Huck 		break;
894d938dc55SCornelia Huck 	}
895d938dc55SCornelia Huck 	return r;
896d938dc55SCornelia Huck }
897d938dc55SCornelia Huck 
8988c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
8998c0a7ce6SDominik Dingel {
9008c0a7ce6SDominik Dingel 	int ret;
9018c0a7ce6SDominik Dingel 
9028c0a7ce6SDominik Dingel 	switch (attr->attr) {
9038c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
9048c0a7ce6SDominik Dingel 		ret = 0;
905c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
906a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
907a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
9088c0a7ce6SDominik Dingel 			ret = -EFAULT;
9098c0a7ce6SDominik Dingel 		break;
9108c0a7ce6SDominik Dingel 	default:
9118c0a7ce6SDominik Dingel 		ret = -ENXIO;
9128c0a7ce6SDominik Dingel 		break;
9138c0a7ce6SDominik Dingel 	}
9148c0a7ce6SDominik Dingel 	return ret;
9158c0a7ce6SDominik Dingel }
9168c0a7ce6SDominik Dingel 
9178c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
9184f718eabSDominik Dingel {
9194f718eabSDominik Dingel 	int ret;
9204f718eabSDominik Dingel 	unsigned int idx;
9214f718eabSDominik Dingel 	switch (attr->attr) {
9224f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
923f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
924c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
925e6db1d61SDominik Dingel 			break;
926e6db1d61SDominik Dingel 
927c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
9284f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
929a4499382SJanosch Frank 		if (kvm->created_vcpus)
930a4499382SJanosch Frank 			ret = -EBUSY;
931a4499382SJanosch Frank 		else if (kvm->mm->context.allow_gmap_hpage_1m)
932a4499382SJanosch Frank 			ret = -EINVAL;
933a4499382SJanosch Frank 		else {
9344f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
935c9f0a2b8SJanosch Frank 			/* Not compatible with cmma. */
936c9f0a2b8SJanosch Frank 			kvm->arch.use_pfmfi = 0;
9374f718eabSDominik Dingel 			ret = 0;
9384f718eabSDominik Dingel 		}
9394f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9404f718eabSDominik Dingel 		break;
9414f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
942f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
943f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
944f9cbd9b0SDavid Hildenbrand 			break;
945c3489155SDominik Dingel 		ret = -EINVAL;
946c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
947c3489155SDominik Dingel 			break;
948c3489155SDominik Dingel 
949c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
9504f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
9514f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
952a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
9534f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
9544f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9554f718eabSDominik Dingel 		ret = 0;
9564f718eabSDominik Dingel 		break;
9578c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
9588c0a7ce6SDominik Dingel 		unsigned long new_limit;
9598c0a7ce6SDominik Dingel 
9608c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
9618c0a7ce6SDominik Dingel 			return -EINVAL;
9628c0a7ce6SDominik Dingel 
9638c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
9648c0a7ce6SDominik Dingel 			return -EFAULT;
9658c0a7ce6SDominik Dingel 
966a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
967a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
9688c0a7ce6SDominik Dingel 			return -E2BIG;
9698c0a7ce6SDominik Dingel 
970a3a92c31SDominik Dingel 		if (!new_limit)
971a3a92c31SDominik Dingel 			return -EINVAL;
972a3a92c31SDominik Dingel 
9736ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
974a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
975a3a92c31SDominik Dingel 			new_limit -= 1;
976a3a92c31SDominik Dingel 
9778c0a7ce6SDominik Dingel 		ret = -EBUSY;
9788c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
979a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
9806ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
9816ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
9828c0a7ce6SDominik Dingel 
9838c0a7ce6SDominik Dingel 			if (!new) {
9848c0a7ce6SDominik Dingel 				ret = -ENOMEM;
9858c0a7ce6SDominik Dingel 			} else {
9866ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
9878c0a7ce6SDominik Dingel 				new->private = kvm;
9888c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
9898c0a7ce6SDominik Dingel 				ret = 0;
9908c0a7ce6SDominik Dingel 			}
9918c0a7ce6SDominik Dingel 		}
9928c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
993a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
994a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
995a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
9968c0a7ce6SDominik Dingel 		break;
9978c0a7ce6SDominik Dingel 	}
9984f718eabSDominik Dingel 	default:
9994f718eabSDominik Dingel 		ret = -ENXIO;
10004f718eabSDominik Dingel 		break;
10014f718eabSDominik Dingel 	}
10024f718eabSDominik Dingel 	return ret;
10034f718eabSDominik Dingel }
10044f718eabSDominik Dingel 
1005a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1006a374e892STony Krowiak 
100720c922f0STony Krowiak void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1008a374e892STony Krowiak {
1009a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
101046808a4cSMarc Zyngier 	unsigned long i;
1011a374e892STony Krowiak 
101220c922f0STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
101320c922f0STony Krowiak 
10143194cdb7SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
101520c922f0STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
10163194cdb7SDavid Hildenbrand 		/* recreate the shadow crycb by leaving the VSIE handler */
10173194cdb7SDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
10183194cdb7SDavid Hildenbrand 	}
101920c922f0STony Krowiak 
102020c922f0STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
102120c922f0STony Krowiak }
102220c922f0STony Krowiak 
102320c922f0STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
102420c922f0STony Krowiak {
1025a374e892STony Krowiak 	mutex_lock(&kvm->lock);
1026a374e892STony Krowiak 	switch (attr->attr) {
1027a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
10288e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10298e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
103037940fb0STony Krowiak 			return -EINVAL;
10318e41bd54SChristian Borntraeger 		}
1032a374e892STony Krowiak 		get_random_bytes(
1033a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1034a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1035a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
1036c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1037a374e892STony Krowiak 		break;
1038a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
10398e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10408e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
104137940fb0STony Krowiak 			return -EINVAL;
10428e41bd54SChristian Borntraeger 		}
1043a374e892STony Krowiak 		get_random_bytes(
1044a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1045a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1046a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
1047c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1048a374e892STony Krowiak 		break;
1049a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
10508e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10518e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
105237940fb0STony Krowiak 			return -EINVAL;
10538e41bd54SChristian Borntraeger 		}
1054a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
1055a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1056a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1057c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1058a374e892STony Krowiak 		break;
1059a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
10608e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10618e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
106237940fb0STony Krowiak 			return -EINVAL;
10638e41bd54SChristian Borntraeger 		}
1064a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
1065a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1066a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1067c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1068a374e892STony Krowiak 		break;
106937940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
107037940fb0STony Krowiak 		if (!ap_instructions_available()) {
107137940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
107237940fb0STony Krowiak 			return -EOPNOTSUPP;
107337940fb0STony Krowiak 		}
107437940fb0STony Krowiak 		kvm->arch.crypto.apie = 1;
107537940fb0STony Krowiak 		break;
107637940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
107737940fb0STony Krowiak 		if (!ap_instructions_available()) {
107837940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
107937940fb0STony Krowiak 			return -EOPNOTSUPP;
108037940fb0STony Krowiak 		}
108137940fb0STony Krowiak 		kvm->arch.crypto.apie = 0;
108237940fb0STony Krowiak 		break;
1083a374e892STony Krowiak 	default:
1084a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
1085a374e892STony Krowiak 		return -ENXIO;
1086a374e892STony Krowiak 	}
1087a374e892STony Krowiak 
108820c922f0STony Krowiak 	kvm_s390_vcpu_crypto_reset_all(kvm);
1089a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
1090a374e892STony Krowiak 	return 0;
1091a374e892STony Krowiak }
1092a374e892STony Krowiak 
10933f4bbb43SMatthew Rosato static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
10943f4bbb43SMatthew Rosato {
10953f4bbb43SMatthew Rosato 	/* Only set the ECB bits after guest requests zPCI interpretation */
10963f4bbb43SMatthew Rosato 	if (!vcpu->kvm->arch.use_zpci_interp)
10973f4bbb43SMatthew Rosato 		return;
10983f4bbb43SMatthew Rosato 
10993f4bbb43SMatthew Rosato 	vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
11003f4bbb43SMatthew Rosato 	vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
11013f4bbb43SMatthew Rosato }
11023f4bbb43SMatthew Rosato 
11033f4bbb43SMatthew Rosato void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
11043f4bbb43SMatthew Rosato {
11053f4bbb43SMatthew Rosato 	struct kvm_vcpu *vcpu;
11063f4bbb43SMatthew Rosato 	unsigned long i;
11073f4bbb43SMatthew Rosato 
11083f4bbb43SMatthew Rosato 	lockdep_assert_held(&kvm->lock);
11093f4bbb43SMatthew Rosato 
11103f4bbb43SMatthew Rosato 	if (!kvm_s390_pci_interp_allowed())
11113f4bbb43SMatthew Rosato 		return;
11123f4bbb43SMatthew Rosato 
11133f4bbb43SMatthew Rosato 	/*
11143f4bbb43SMatthew Rosato 	 * If host is configured for PCI and the necessary facilities are
11153f4bbb43SMatthew Rosato 	 * available, turn on interpretation for the life of this guest
11163f4bbb43SMatthew Rosato 	 */
11173f4bbb43SMatthew Rosato 	kvm->arch.use_zpci_interp = 1;
11183f4bbb43SMatthew Rosato 
11193f4bbb43SMatthew Rosato 	kvm_s390_vcpu_block_all(kvm);
11203f4bbb43SMatthew Rosato 
11213f4bbb43SMatthew Rosato 	kvm_for_each_vcpu(i, vcpu, kvm) {
11223f4bbb43SMatthew Rosato 		kvm_s390_vcpu_pci_setup(vcpu);
11233f4bbb43SMatthew Rosato 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
11243f4bbb43SMatthew Rosato 	}
11253f4bbb43SMatthew Rosato 
11263f4bbb43SMatthew Rosato 	kvm_s390_vcpu_unblock_all(kvm);
11273f4bbb43SMatthew Rosato }
11283f4bbb43SMatthew Rosato 
1129190df4a2SClaudio Imbrenda static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1130190df4a2SClaudio Imbrenda {
113146808a4cSMarc Zyngier 	unsigned long cx;
1132190df4a2SClaudio Imbrenda 	struct kvm_vcpu *vcpu;
1133190df4a2SClaudio Imbrenda 
1134190df4a2SClaudio Imbrenda 	kvm_for_each_vcpu(cx, vcpu, kvm)
1135190df4a2SClaudio Imbrenda 		kvm_s390_sync_request(req, vcpu);
1136190df4a2SClaudio Imbrenda }
1137190df4a2SClaudio Imbrenda 
1138190df4a2SClaudio Imbrenda /*
1139190df4a2SClaudio Imbrenda  * Must be called with kvm->srcu held to avoid races on memslots, and with
11401de1ea7eSChristian Borntraeger  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1141190df4a2SClaudio Imbrenda  */
1142190df4a2SClaudio Imbrenda static int kvm_s390_vm_start_migration(struct kvm *kvm)
1143190df4a2SClaudio Imbrenda {
1144190df4a2SClaudio Imbrenda 	struct kvm_memory_slot *ms;
1145190df4a2SClaudio Imbrenda 	struct kvm_memslots *slots;
1146afdad616SClaudio Imbrenda 	unsigned long ram_pages = 0;
1147a54d8066SMaciej S. Szmigiero 	int bkt;
1148190df4a2SClaudio Imbrenda 
1149190df4a2SClaudio Imbrenda 	/* migration mode already enabled */
1150afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
1151190df4a2SClaudio Imbrenda 		return 0;
1152190df4a2SClaudio Imbrenda 	slots = kvm_memslots(kvm);
1153a54d8066SMaciej S. Szmigiero 	if (!slots || kvm_memslots_empty(slots))
1154190df4a2SClaudio Imbrenda 		return -EINVAL;
1155190df4a2SClaudio Imbrenda 
1156afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma) {
1157afdad616SClaudio Imbrenda 		kvm->arch.migration_mode = 1;
1158afdad616SClaudio Imbrenda 		return 0;
1159190df4a2SClaudio Imbrenda 	}
1160190df4a2SClaudio Imbrenda 	/* mark all the pages in active slots as dirty */
1161a54d8066SMaciej S. Szmigiero 	kvm_for_each_memslot(ms, bkt, slots) {
116213a17cc0SIgor Mammedov 		if (!ms->dirty_bitmap)
116313a17cc0SIgor Mammedov 			return -EINVAL;
1164afdad616SClaudio Imbrenda 		/*
1165afdad616SClaudio Imbrenda 		 * The second half of the bitmap is only used on x86,
1166afdad616SClaudio Imbrenda 		 * and would be wasted otherwise, so we put it to good
1167afdad616SClaudio Imbrenda 		 * use here to keep track of the state of the storage
1168afdad616SClaudio Imbrenda 		 * attributes.
1169afdad616SClaudio Imbrenda 		 */
1170afdad616SClaudio Imbrenda 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1171afdad616SClaudio Imbrenda 		ram_pages += ms->npages;
1172190df4a2SClaudio Imbrenda 	}
1173afdad616SClaudio Imbrenda 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1174afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 1;
1175190df4a2SClaudio Imbrenda 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1176190df4a2SClaudio Imbrenda 	return 0;
1177190df4a2SClaudio Imbrenda }
1178190df4a2SClaudio Imbrenda 
1179190df4a2SClaudio Imbrenda /*
11801de1ea7eSChristian Borntraeger  * Must be called with kvm->slots_lock to avoid races with ourselves and
1181190df4a2SClaudio Imbrenda  * kvm_s390_vm_start_migration.
1182190df4a2SClaudio Imbrenda  */
1183190df4a2SClaudio Imbrenda static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1184190df4a2SClaudio Imbrenda {
1185190df4a2SClaudio Imbrenda 	/* migration mode already disabled */
1186afdad616SClaudio Imbrenda 	if (!kvm->arch.migration_mode)
1187190df4a2SClaudio Imbrenda 		return 0;
1188afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 0;
1189afdad616SClaudio Imbrenda 	if (kvm->arch.use_cmma)
1190190df4a2SClaudio Imbrenda 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1191190df4a2SClaudio Imbrenda 	return 0;
1192190df4a2SClaudio Imbrenda }
1193190df4a2SClaudio Imbrenda 
1194190df4a2SClaudio Imbrenda static int kvm_s390_vm_set_migration(struct kvm *kvm,
1195190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1196190df4a2SClaudio Imbrenda {
11971de1ea7eSChristian Borntraeger 	int res = -ENXIO;
1198190df4a2SClaudio Imbrenda 
11991de1ea7eSChristian Borntraeger 	mutex_lock(&kvm->slots_lock);
1200190df4a2SClaudio Imbrenda 	switch (attr->attr) {
1201190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_START:
1202190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_start_migration(kvm);
1203190df4a2SClaudio Imbrenda 		break;
1204190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_STOP:
1205190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_stop_migration(kvm);
1206190df4a2SClaudio Imbrenda 		break;
1207190df4a2SClaudio Imbrenda 	default:
1208190df4a2SClaudio Imbrenda 		break;
1209190df4a2SClaudio Imbrenda 	}
12101de1ea7eSChristian Borntraeger 	mutex_unlock(&kvm->slots_lock);
1211190df4a2SClaudio Imbrenda 
1212190df4a2SClaudio Imbrenda 	return res;
1213190df4a2SClaudio Imbrenda }
1214190df4a2SClaudio Imbrenda 
1215190df4a2SClaudio Imbrenda static int kvm_s390_vm_get_migration(struct kvm *kvm,
1216190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1217190df4a2SClaudio Imbrenda {
1218afdad616SClaudio Imbrenda 	u64 mig = kvm->arch.migration_mode;
1219190df4a2SClaudio Imbrenda 
1220190df4a2SClaudio Imbrenda 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1221190df4a2SClaudio Imbrenda 		return -ENXIO;
1222190df4a2SClaudio Imbrenda 
1223190df4a2SClaudio Imbrenda 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1224190df4a2SClaudio Imbrenda 		return -EFAULT;
1225190df4a2SClaudio Imbrenda 	return 0;
1226190df4a2SClaudio Imbrenda }
1227190df4a2SClaudio Imbrenda 
12286973091dSNico Boehr static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
12296973091dSNico Boehr 
12308fa1696eSCollin L. Walling static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
12318fa1696eSCollin L. Walling {
12328fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
12338fa1696eSCollin L. Walling 
12348fa1696eSCollin L. Walling 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
12358fa1696eSCollin L. Walling 		return -EFAULT;
12368fa1696eSCollin L. Walling 
12370e7def5fSDavid Hildenbrand 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
12388fa1696eSCollin L. Walling 		return -EINVAL;
12396973091dSNico Boehr 	__kvm_s390_set_tod_clock(kvm, &gtod);
12408fa1696eSCollin L. Walling 
12418fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
12428fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
12438fa1696eSCollin L. Walling 
12448fa1696eSCollin L. Walling 	return 0;
12458fa1696eSCollin L. Walling }
12468fa1696eSCollin L. Walling 
124772f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
124872f25020SJason J. Herne {
124972f25020SJason J. Herne 	u8 gtod_high;
125072f25020SJason J. Herne 
125172f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
125272f25020SJason J. Herne 					   sizeof(gtod_high)))
125372f25020SJason J. Herne 		return -EFAULT;
125472f25020SJason J. Herne 
125572f25020SJason J. Herne 	if (gtod_high != 0)
125672f25020SJason J. Herne 		return -EINVAL;
125758c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
125872f25020SJason J. Herne 
125972f25020SJason J. Herne 	return 0;
126072f25020SJason J. Herne }
126172f25020SJason J. Herne 
126272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
126372f25020SJason J. Herne {
12640e7def5fSDavid Hildenbrand 	struct kvm_s390_vm_tod_clock gtod = { 0 };
126572f25020SJason J. Herne 
12660e7def5fSDavid Hildenbrand 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
12670e7def5fSDavid Hildenbrand 			   sizeof(gtod.tod)))
126872f25020SJason J. Herne 		return -EFAULT;
126972f25020SJason J. Herne 
12706973091dSNico Boehr 	__kvm_s390_set_tod_clock(kvm, &gtod);
12710e7def5fSDavid Hildenbrand 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
127272f25020SJason J. Herne 	return 0;
127372f25020SJason J. Herne }
127472f25020SJason J. Herne 
127572f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
127672f25020SJason J. Herne {
127772f25020SJason J. Herne 	int ret;
127872f25020SJason J. Herne 
127972f25020SJason J. Herne 	if (attr->flags)
128072f25020SJason J. Herne 		return -EINVAL;
128172f25020SJason J. Herne 
12826973091dSNico Boehr 	mutex_lock(&kvm->lock);
12836973091dSNico Boehr 	/*
12846973091dSNico Boehr 	 * For protected guests, the TOD is managed by the ultravisor, so trying
12856973091dSNico Boehr 	 * to change it will never bring the expected results.
12866973091dSNico Boehr 	 */
12876973091dSNico Boehr 	if (kvm_s390_pv_is_protected(kvm)) {
12886973091dSNico Boehr 		ret = -EOPNOTSUPP;
12896973091dSNico Boehr 		goto out_unlock;
12906973091dSNico Boehr 	}
12916973091dSNico Boehr 
129272f25020SJason J. Herne 	switch (attr->attr) {
12938fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
12948fa1696eSCollin L. Walling 		ret = kvm_s390_set_tod_ext(kvm, attr);
12958fa1696eSCollin L. Walling 		break;
129672f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
129772f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
129872f25020SJason J. Herne 		break;
129972f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
130072f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
130172f25020SJason J. Herne 		break;
130272f25020SJason J. Herne 	default:
130372f25020SJason J. Herne 		ret = -ENXIO;
130472f25020SJason J. Herne 		break;
130572f25020SJason J. Herne 	}
13066973091dSNico Boehr 
13076973091dSNico Boehr out_unlock:
13086973091dSNico Boehr 	mutex_unlock(&kvm->lock);
130972f25020SJason J. Herne 	return ret;
131072f25020SJason J. Herne }
131172f25020SJason J. Herne 
131233d1b272SDavid Hildenbrand static void kvm_s390_get_tod_clock(struct kvm *kvm,
13138fa1696eSCollin L. Walling 				   struct kvm_s390_vm_tod_clock *gtod)
13148fa1696eSCollin L. Walling {
13152cfd7b73SHeiko Carstens 	union tod_clock clk;
13168fa1696eSCollin L. Walling 
13178fa1696eSCollin L. Walling 	preempt_disable();
13188fa1696eSCollin L. Walling 
13192cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
13208fa1696eSCollin L. Walling 
13212cfd7b73SHeiko Carstens 	gtod->tod = clk.tod + kvm->arch.epoch;
132233d1b272SDavid Hildenbrand 	gtod->epoch_idx = 0;
132333d1b272SDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
13242cfd7b73SHeiko Carstens 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
13252cfd7b73SHeiko Carstens 		if (gtod->tod < clk.tod)
13268fa1696eSCollin L. Walling 			gtod->epoch_idx += 1;
132733d1b272SDavid Hildenbrand 	}
13288fa1696eSCollin L. Walling 
13298fa1696eSCollin L. Walling 	preempt_enable();
13308fa1696eSCollin L. Walling }
13318fa1696eSCollin L. Walling 
13328fa1696eSCollin L. Walling static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
13338fa1696eSCollin L. Walling {
13348fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
13358fa1696eSCollin L. Walling 
13368fa1696eSCollin L. Walling 	memset(&gtod, 0, sizeof(gtod));
133733d1b272SDavid Hildenbrand 	kvm_s390_get_tod_clock(kvm, &gtod);
13388fa1696eSCollin L. Walling 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
13398fa1696eSCollin L. Walling 		return -EFAULT;
13408fa1696eSCollin L. Walling 
13418fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
13428fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
13438fa1696eSCollin L. Walling 	return 0;
13448fa1696eSCollin L. Walling }
13458fa1696eSCollin L. Walling 
134672f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
134772f25020SJason J. Herne {
134872f25020SJason J. Herne 	u8 gtod_high = 0;
134972f25020SJason J. Herne 
135072f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
135172f25020SJason J. Herne 					 sizeof(gtod_high)))
135272f25020SJason J. Herne 		return -EFAULT;
135358c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
135472f25020SJason J. Herne 
135572f25020SJason J. Herne 	return 0;
135672f25020SJason J. Herne }
135772f25020SJason J. Herne 
135872f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
135972f25020SJason J. Herne {
13605a3d883aSDavid Hildenbrand 	u64 gtod;
136172f25020SJason J. Herne 
136260417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
136372f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
136472f25020SJason J. Herne 		return -EFAULT;
136558c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
136672f25020SJason J. Herne 
136772f25020SJason J. Herne 	return 0;
136872f25020SJason J. Herne }
136972f25020SJason J. Herne 
137072f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
137172f25020SJason J. Herne {
137272f25020SJason J. Herne 	int ret;
137372f25020SJason J. Herne 
137472f25020SJason J. Herne 	if (attr->flags)
137572f25020SJason J. Herne 		return -EINVAL;
137672f25020SJason J. Herne 
137772f25020SJason J. Herne 	switch (attr->attr) {
13788fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
13798fa1696eSCollin L. Walling 		ret = kvm_s390_get_tod_ext(kvm, attr);
13808fa1696eSCollin L. Walling 		break;
138172f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
138272f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
138372f25020SJason J. Herne 		break;
138472f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
138572f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
138672f25020SJason J. Herne 		break;
138772f25020SJason J. Herne 	default:
138872f25020SJason J. Herne 		ret = -ENXIO;
138972f25020SJason J. Herne 		break;
139072f25020SJason J. Herne 	}
139172f25020SJason J. Herne 	return ret;
139272f25020SJason J. Herne }
139372f25020SJason J. Herne 
1394658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1395658b6edaSMichael Mueller {
1396658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1397053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
1398658b6edaSMichael Mueller 	int ret = 0;
1399658b6edaSMichael Mueller 
1400658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
1401a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
1402658b6edaSMichael Mueller 		ret = -EBUSY;
1403658b6edaSMichael Mueller 		goto out;
1404658b6edaSMichael Mueller 	}
1405c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1406658b6edaSMichael Mueller 	if (!proc) {
1407658b6edaSMichael Mueller 		ret = -ENOMEM;
1408658b6edaSMichael Mueller 		goto out;
1409658b6edaSMichael Mueller 	}
1410658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
1411658b6edaSMichael Mueller 			    sizeof(*proc))) {
14129bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
1413053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1414053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
14150487c44dSDavid Hildenbrand 		if (lowest_ibc && proc->ibc) {
1416053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
1417053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
1418053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
1419053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
1420053dd230SDavid Hildenbrand 			else
1421658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
1422053dd230SDavid Hildenbrand 		}
1423c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1424658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1425a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1426a8c39dd7SChristian Borntraeger 			 kvm->arch.model.ibc,
1427a8c39dd7SChristian Borntraeger 			 kvm->arch.model.cpuid);
1428a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1429a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[0],
1430a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[1],
1431a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[2]);
1432658b6edaSMichael Mueller 	} else
1433658b6edaSMichael Mueller 		ret = -EFAULT;
1434658b6edaSMichael Mueller 	kfree(proc);
1435658b6edaSMichael Mueller out:
1436658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
1437658b6edaSMichael Mueller 	return ret;
1438658b6edaSMichael Mueller }
1439658b6edaSMichael Mueller 
144015c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
144115c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
144215c9705fSDavid Hildenbrand {
144315c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
144415c9705fSDavid Hildenbrand 
144515c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
144615c9705fSDavid Hildenbrand 		return -EFAULT;
144715c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
144815c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
144915c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
145015c9705fSDavid Hildenbrand 		return -EINVAL;
145115c9705fSDavid Hildenbrand 
145215c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
14532f8311c9SChristian Borntraeger 	if (kvm->created_vcpus) {
14542f8311c9SChristian Borntraeger 		mutex_unlock(&kvm->lock);
14552f8311c9SChristian Borntraeger 		return -EBUSY;
14562f8311c9SChristian Borntraeger 	}
1457da0f8e95SYury Norov 	bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
145815c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
14592f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
14602f8311c9SChristian Borntraeger 			 data.feat[0],
14612f8311c9SChristian Borntraeger 			 data.feat[1],
14622f8311c9SChristian Borntraeger 			 data.feat[2]);
14632f8311c9SChristian Borntraeger 	return 0;
146415c9705fSDavid Hildenbrand }
146515c9705fSDavid Hildenbrand 
14660a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
14670a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
14680a763c78SDavid Hildenbrand {
1469346fa2f8SChristian Borntraeger 	mutex_lock(&kvm->lock);
1470346fa2f8SChristian Borntraeger 	if (kvm->created_vcpus) {
1471346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1472346fa2f8SChristian Borntraeger 		return -EBUSY;
1473346fa2f8SChristian Borntraeger 	}
1474346fa2f8SChristian Borntraeger 
1475346fa2f8SChristian Borntraeger 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1476346fa2f8SChristian Borntraeger 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1477346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1478346fa2f8SChristian Borntraeger 		return -EFAULT;
1479346fa2f8SChristian Borntraeger 	}
1480346fa2f8SChristian Borntraeger 	mutex_unlock(&kvm->lock);
1481346fa2f8SChristian Borntraeger 
148211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
148311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
148411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
148511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
148611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
148711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
148811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
148911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
149011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
149111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
149211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
149311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
149411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
149511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
149611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
149711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
149811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
149911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
150011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
150111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
150211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
150311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
150411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
150511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
150611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
150711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
150811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
150911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
151011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
151111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
151211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
151311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
151411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
151511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
151611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
151711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
151811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
151911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
152011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
152111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
152211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
152311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
152411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
152511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
152613209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
152713209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
152813209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1529173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1530173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1531173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1532173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1533173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
15344f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
15354f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
15364f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
15374f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
15384f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
153911ba5961SChristian Borntraeger 
1540346fa2f8SChristian Borntraeger 	return 0;
15410a763c78SDavid Hildenbrand }
15420a763c78SDavid Hildenbrand 
1543658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1544658b6edaSMichael Mueller {
1545658b6edaSMichael Mueller 	int ret = -ENXIO;
1546658b6edaSMichael Mueller 
1547658b6edaSMichael Mueller 	switch (attr->attr) {
1548658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1549658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
1550658b6edaSMichael Mueller 		break;
155115c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
155215c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
155315c9705fSDavid Hildenbrand 		break;
15540a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
15550a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
15560a763c78SDavid Hildenbrand 		break;
1557658b6edaSMichael Mueller 	}
1558658b6edaSMichael Mueller 	return ret;
1559658b6edaSMichael Mueller }
1560658b6edaSMichael Mueller 
1561658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1562658b6edaSMichael Mueller {
1563658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1564658b6edaSMichael Mueller 	int ret = 0;
1565658b6edaSMichael Mueller 
1566c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1567658b6edaSMichael Mueller 	if (!proc) {
1568658b6edaSMichael Mueller 		ret = -ENOMEM;
1569658b6edaSMichael Mueller 		goto out;
1570658b6edaSMichael Mueller 	}
15719bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
1572658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
1573c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1574c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1575a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1576a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1577a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1578a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1579a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[0],
1580a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[1],
1581a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[2]);
1582658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1583658b6edaSMichael Mueller 		ret = -EFAULT;
1584658b6edaSMichael Mueller 	kfree(proc);
1585658b6edaSMichael Mueller out:
1586658b6edaSMichael Mueller 	return ret;
1587658b6edaSMichael Mueller }
1588658b6edaSMichael Mueller 
1589658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1590658b6edaSMichael Mueller {
1591658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
1592658b6edaSMichael Mueller 	int ret = 0;
1593658b6edaSMichael Mueller 
1594c4196218SChristian Borntraeger 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1595658b6edaSMichael Mueller 	if (!mach) {
1596658b6edaSMichael Mueller 		ret = -ENOMEM;
1597658b6edaSMichael Mueller 		goto out;
1598658b6edaSMichael Mueller 	}
1599658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
160037c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
1601c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1602981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
160317e89e13SSven Schnelle 	memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
160417e89e13SSven Schnelle 	       sizeof(stfle_fac_list));
1605a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1606a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1607a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1608a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1609a8c39dd7SChristian Borntraeger 		 mach->fac_mask[0],
1610a8c39dd7SChristian Borntraeger 		 mach->fac_mask[1],
1611a8c39dd7SChristian Borntraeger 		 mach->fac_mask[2]);
1612a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1613a8c39dd7SChristian Borntraeger 		 mach->fac_list[0],
1614a8c39dd7SChristian Borntraeger 		 mach->fac_list[1],
1615a8c39dd7SChristian Borntraeger 		 mach->fac_list[2]);
1616658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1617658b6edaSMichael Mueller 		ret = -EFAULT;
1618658b6edaSMichael Mueller 	kfree(mach);
1619658b6edaSMichael Mueller out:
1620658b6edaSMichael Mueller 	return ret;
1621658b6edaSMichael Mueller }
1622658b6edaSMichael Mueller 
162315c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
162415c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
162515c9705fSDavid Hildenbrand {
162615c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
162715c9705fSDavid Hildenbrand 
1628da0f8e95SYury Norov 	bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
162915c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
163015c9705fSDavid Hildenbrand 		return -EFAULT;
16312f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
16322f8311c9SChristian Borntraeger 			 data.feat[0],
16332f8311c9SChristian Borntraeger 			 data.feat[1],
16342f8311c9SChristian Borntraeger 			 data.feat[2]);
163515c9705fSDavid Hildenbrand 	return 0;
163615c9705fSDavid Hildenbrand }
163715c9705fSDavid Hildenbrand 
163815c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
163915c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
164015c9705fSDavid Hildenbrand {
164115c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
164215c9705fSDavid Hildenbrand 
1643da0f8e95SYury Norov 	bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
164415c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
164515c9705fSDavid Hildenbrand 		return -EFAULT;
16462f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
16472f8311c9SChristian Borntraeger 			 data.feat[0],
16482f8311c9SChristian Borntraeger 			 data.feat[1],
16492f8311c9SChristian Borntraeger 			 data.feat[2]);
165015c9705fSDavid Hildenbrand 	return 0;
165115c9705fSDavid Hildenbrand }
165215c9705fSDavid Hildenbrand 
16530a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
16540a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
16550a763c78SDavid Hildenbrand {
1656346fa2f8SChristian Borntraeger 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1657346fa2f8SChristian Borntraeger 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1658346fa2f8SChristian Borntraeger 		return -EFAULT;
1659346fa2f8SChristian Borntraeger 
166011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
166111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
166211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
166311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
166411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
166511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
166611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
166711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
166811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
166911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
167011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
167111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
167211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
167311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
167411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
167511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
167611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
167711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
167811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
167911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
168011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
168111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
168211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
168311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
168411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
168511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
168611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
168711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
168811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
168911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
169011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
169111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
169211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
169311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
169411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
169511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
169611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
169711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
169811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
169911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
170011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
170111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
170211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
170311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
170413209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
170513209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
170613209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1707173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1708173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1709173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1710173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1711173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
17124f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17134f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
17144f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
17154f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
17164f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
171711ba5961SChristian Borntraeger 
1718346fa2f8SChristian Borntraeger 	return 0;
17190a763c78SDavid Hildenbrand }
17200a763c78SDavid Hildenbrand 
17210a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
17220a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
17230a763c78SDavid Hildenbrand {
17240a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
17250a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
17260a763c78SDavid Hildenbrand 		return -EFAULT;
172711ba5961SChristian Borntraeger 
172811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
172911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
173011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
173111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
173211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
173311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
173411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
173511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
173611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
173711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
173811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
173911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
174011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
174111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
174211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
174311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
174411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
174511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
174611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
174711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
174811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
174911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
175011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
175111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
175211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
175311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
175411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
175511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
175611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
175711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
175811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
175911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
176011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
176111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
176211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
176311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
176411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
176511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
176611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
176711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
176811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
176911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
177011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
177111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
177213209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
177313209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
177413209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1775173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1776173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1777173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1778173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1779173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
17804f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17814f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
17824f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
17834f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
17844f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
178511ba5961SChristian Borntraeger 
17860a763c78SDavid Hildenbrand 	return 0;
17870a763c78SDavid Hildenbrand }
1788346fa2f8SChristian Borntraeger 
1789658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1790658b6edaSMichael Mueller {
1791658b6edaSMichael Mueller 	int ret = -ENXIO;
1792658b6edaSMichael Mueller 
1793658b6edaSMichael Mueller 	switch (attr->attr) {
1794658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1795658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
1796658b6edaSMichael Mueller 		break;
1797658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
1798658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
1799658b6edaSMichael Mueller 		break;
180015c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
180115c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
180215c9705fSDavid Hildenbrand 		break;
180315c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
180415c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
180515c9705fSDavid Hildenbrand 		break;
18060a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
18070a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
18080a763c78SDavid Hildenbrand 		break;
18090a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
18100a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
18110a763c78SDavid Hildenbrand 		break;
1812658b6edaSMichael Mueller 	}
1813658b6edaSMichael Mueller 	return ret;
1814658b6edaSMichael Mueller }
1815658b6edaSMichael Mueller 
181624fe0195SPierre Morel /**
181724fe0195SPierre Morel  * kvm_s390_update_topology_change_report - update CPU topology change report
181824fe0195SPierre Morel  * @kvm: guest KVM description
181924fe0195SPierre Morel  * @val: set or clear the MTCR bit
182024fe0195SPierre Morel  *
182124fe0195SPierre Morel  * Updates the Multiprocessor Topology-Change-Report bit to signal
182224fe0195SPierre Morel  * the guest with a topology change.
182324fe0195SPierre Morel  * This is only relevant if the topology facility is present.
182424fe0195SPierre Morel  *
182524fe0195SPierre Morel  * The SCA version, bsca or esca, doesn't matter as offset is the same.
182624fe0195SPierre Morel  */
182724fe0195SPierre Morel static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
182824fe0195SPierre Morel {
182924fe0195SPierre Morel 	union sca_utility new, old;
183024fe0195SPierre Morel 	struct bsca_block *sca;
183124fe0195SPierre Morel 
183224fe0195SPierre Morel 	read_lock(&kvm->arch.sca_lock);
183324fe0195SPierre Morel 	sca = kvm->arch.sca;
183424fe0195SPierre Morel 	do {
183524fe0195SPierre Morel 		old = READ_ONCE(sca->utility);
183624fe0195SPierre Morel 		new = old;
183724fe0195SPierre Morel 		new.mtcr = val;
183824fe0195SPierre Morel 	} while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val);
183924fe0195SPierre Morel 	read_unlock(&kvm->arch.sca_lock);
184024fe0195SPierre Morel }
184124fe0195SPierre Morel 
1842f5ecfee9SPierre Morel static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1843f5ecfee9SPierre Morel 					       struct kvm_device_attr *attr)
1844f5ecfee9SPierre Morel {
1845f5ecfee9SPierre Morel 	if (!test_kvm_facility(kvm, 11))
1846f5ecfee9SPierre Morel 		return -ENXIO;
1847f5ecfee9SPierre Morel 
1848f5ecfee9SPierre Morel 	kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1849f5ecfee9SPierre Morel 	return 0;
1850f5ecfee9SPierre Morel }
1851f5ecfee9SPierre Morel 
1852f5ecfee9SPierre Morel static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1853f5ecfee9SPierre Morel 					       struct kvm_device_attr *attr)
1854f5ecfee9SPierre Morel {
1855f5ecfee9SPierre Morel 	u8 topo;
1856f5ecfee9SPierre Morel 
1857f5ecfee9SPierre Morel 	if (!test_kvm_facility(kvm, 11))
1858f5ecfee9SPierre Morel 		return -ENXIO;
1859f5ecfee9SPierre Morel 
1860f5ecfee9SPierre Morel 	read_lock(&kvm->arch.sca_lock);
1861f5ecfee9SPierre Morel 	topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1862f5ecfee9SPierre Morel 	read_unlock(&kvm->arch.sca_lock);
1863f5ecfee9SPierre Morel 
1864f5ecfee9SPierre Morel 	return put_user(topo, (u8 __user *)attr->addr);
1865f5ecfee9SPierre Morel }
1866f5ecfee9SPierre Morel 
1867f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1868f2061656SDominik Dingel {
1869f2061656SDominik Dingel 	int ret;
1870f2061656SDominik Dingel 
1871f2061656SDominik Dingel 	switch (attr->group) {
18724f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18738c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
18744f718eabSDominik Dingel 		break;
187572f25020SJason J. Herne 	case KVM_S390_VM_TOD:
187672f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
187772f25020SJason J. Herne 		break;
1878658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1879658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
1880658b6edaSMichael Mueller 		break;
1881a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1882a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1883a374e892STony Krowiak 		break;
1884190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1885190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_set_migration(kvm, attr);
1886190df4a2SClaudio Imbrenda 		break;
1887f5ecfee9SPierre Morel 	case KVM_S390_VM_CPU_TOPOLOGY:
1888f5ecfee9SPierre Morel 		ret = kvm_s390_set_topo_change_indication(kvm, attr);
1889f5ecfee9SPierre Morel 		break;
1890f2061656SDominik Dingel 	default:
1891f2061656SDominik Dingel 		ret = -ENXIO;
1892f2061656SDominik Dingel 		break;
1893f2061656SDominik Dingel 	}
1894f2061656SDominik Dingel 
1895f2061656SDominik Dingel 	return ret;
1896f2061656SDominik Dingel }
1897f2061656SDominik Dingel 
1898f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1899f2061656SDominik Dingel {
19008c0a7ce6SDominik Dingel 	int ret;
19018c0a7ce6SDominik Dingel 
19028c0a7ce6SDominik Dingel 	switch (attr->group) {
19038c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
19048c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
19058c0a7ce6SDominik Dingel 		break;
190672f25020SJason J. Herne 	case KVM_S390_VM_TOD:
190772f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
190872f25020SJason J. Herne 		break;
1909658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1910658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1911658b6edaSMichael Mueller 		break;
1912190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1913190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_get_migration(kvm, attr);
1914190df4a2SClaudio Imbrenda 		break;
1915f5ecfee9SPierre Morel 	case KVM_S390_VM_CPU_TOPOLOGY:
1916f5ecfee9SPierre Morel 		ret = kvm_s390_get_topo_change_indication(kvm, attr);
1917f5ecfee9SPierre Morel 		break;
19188c0a7ce6SDominik Dingel 	default:
19198c0a7ce6SDominik Dingel 		ret = -ENXIO;
19208c0a7ce6SDominik Dingel 		break;
19218c0a7ce6SDominik Dingel 	}
19228c0a7ce6SDominik Dingel 
19238c0a7ce6SDominik Dingel 	return ret;
1924f2061656SDominik Dingel }
1925f2061656SDominik Dingel 
1926f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1927f2061656SDominik Dingel {
1928f2061656SDominik Dingel 	int ret;
1929f2061656SDominik Dingel 
1930f2061656SDominik Dingel 	switch (attr->group) {
19314f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
19324f718eabSDominik Dingel 		switch (attr->attr) {
19334f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
19344f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1935f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1936f9cbd9b0SDavid Hildenbrand 			break;
19378c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
19384f718eabSDominik Dingel 			ret = 0;
19394f718eabSDominik Dingel 			break;
19404f718eabSDominik Dingel 		default:
19414f718eabSDominik Dingel 			ret = -ENXIO;
19424f718eabSDominik Dingel 			break;
19434f718eabSDominik Dingel 		}
19444f718eabSDominik Dingel 		break;
194572f25020SJason J. Herne 	case KVM_S390_VM_TOD:
194672f25020SJason J. Herne 		switch (attr->attr) {
194772f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
194872f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
194972f25020SJason J. Herne 			ret = 0;
195072f25020SJason J. Herne 			break;
195172f25020SJason J. Herne 		default:
195272f25020SJason J. Herne 			ret = -ENXIO;
195372f25020SJason J. Herne 			break;
195472f25020SJason J. Herne 		}
195572f25020SJason J. Herne 		break;
1956658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1957658b6edaSMichael Mueller 		switch (attr->attr) {
1958658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1959658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
196015c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
196115c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
19620a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1963346fa2f8SChristian Borntraeger 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1964658b6edaSMichael Mueller 			ret = 0;
1965658b6edaSMichael Mueller 			break;
1966658b6edaSMichael Mueller 		default:
1967658b6edaSMichael Mueller 			ret = -ENXIO;
1968658b6edaSMichael Mueller 			break;
1969658b6edaSMichael Mueller 		}
1970658b6edaSMichael Mueller 		break;
1971a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1972a374e892STony Krowiak 		switch (attr->attr) {
1973a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1974a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1975a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1976a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1977a374e892STony Krowiak 			ret = 0;
1978a374e892STony Krowiak 			break;
197937940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
198037940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
198137940fb0STony Krowiak 			ret = ap_instructions_available() ? 0 : -ENXIO;
198237940fb0STony Krowiak 			break;
1983a374e892STony Krowiak 		default:
1984a374e892STony Krowiak 			ret = -ENXIO;
1985a374e892STony Krowiak 			break;
1986a374e892STony Krowiak 		}
1987a374e892STony Krowiak 		break;
1988190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1989190df4a2SClaudio Imbrenda 		ret = 0;
1990190df4a2SClaudio Imbrenda 		break;
1991f5ecfee9SPierre Morel 	case KVM_S390_VM_CPU_TOPOLOGY:
1992f5ecfee9SPierre Morel 		ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
1993f5ecfee9SPierre Morel 		break;
1994f2061656SDominik Dingel 	default:
1995f2061656SDominik Dingel 		ret = -ENXIO;
1996f2061656SDominik Dingel 		break;
1997f2061656SDominik Dingel 	}
1998f2061656SDominik Dingel 
1999f2061656SDominik Dingel 	return ret;
2000f2061656SDominik Dingel }
2001f2061656SDominik Dingel 
200230ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
200330ee2a98SJason J. Herne {
200430ee2a98SJason J. Herne 	uint8_t *keys;
200530ee2a98SJason J. Herne 	uint64_t hva;
20064f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
200730ee2a98SJason J. Herne 
200830ee2a98SJason J. Herne 	if (args->flags != 0)
200930ee2a98SJason J. Herne 		return -EINVAL;
201030ee2a98SJason J. Herne 
201130ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
201255531b74SJanosch Frank 	if (!mm_uses_skeys(current->mm))
201330ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
201430ee2a98SJason J. Herne 
201530ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
201630ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
201730ee2a98SJason J. Herne 		return -EINVAL;
201830ee2a98SJason J. Herne 
2019c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
202030ee2a98SJason J. Herne 	if (!keys)
202130ee2a98SJason J. Herne 		return -ENOMEM;
202230ee2a98SJason J. Herne 
2023d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
20244f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
202530ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
202630ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
202730ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
202830ee2a98SJason J. Herne 			r = -EFAULT;
2029d3ed1ceeSMartin Schwidefsky 			break;
203030ee2a98SJason J. Herne 		}
203130ee2a98SJason J. Herne 
2032154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
2033154c8c19SDavid Hildenbrand 		if (r)
2034d3ed1ceeSMartin Schwidefsky 			break;
203530ee2a98SJason J. Herne 	}
20364f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2037d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
203830ee2a98SJason J. Herne 
2039d3ed1ceeSMartin Schwidefsky 	if (!r) {
204030ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
204130ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
204230ee2a98SJason J. Herne 		if (r)
204330ee2a98SJason J. Herne 			r = -EFAULT;
2044d3ed1ceeSMartin Schwidefsky 	}
2045d3ed1ceeSMartin Schwidefsky 
204630ee2a98SJason J. Herne 	kvfree(keys);
204730ee2a98SJason J. Herne 	return r;
204830ee2a98SJason J. Herne }
204930ee2a98SJason J. Herne 
205030ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
205130ee2a98SJason J. Herne {
205230ee2a98SJason J. Herne 	uint8_t *keys;
205330ee2a98SJason J. Herne 	uint64_t hva;
20544f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
2055bd096f64SJanosch Frank 	bool unlocked;
205630ee2a98SJason J. Herne 
205730ee2a98SJason J. Herne 	if (args->flags != 0)
205830ee2a98SJason J. Herne 		return -EINVAL;
205930ee2a98SJason J. Herne 
206030ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
206130ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
206230ee2a98SJason J. Herne 		return -EINVAL;
206330ee2a98SJason J. Herne 
2064c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
206530ee2a98SJason J. Herne 	if (!keys)
206630ee2a98SJason J. Herne 		return -ENOMEM;
206730ee2a98SJason J. Herne 
206830ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
206930ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
207030ee2a98SJason J. Herne 	if (r) {
207130ee2a98SJason J. Herne 		r = -EFAULT;
207230ee2a98SJason J. Herne 		goto out;
207330ee2a98SJason J. Herne 	}
207430ee2a98SJason J. Herne 
207530ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
207614d4a425SDominik Dingel 	r = s390_enable_skey();
207714d4a425SDominik Dingel 	if (r)
207814d4a425SDominik Dingel 		goto out;
207930ee2a98SJason J. Herne 
2080bd096f64SJanosch Frank 	i = 0;
2081d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
20824f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
2083bd096f64SJanosch Frank         while (i < args->count) {
2084bd096f64SJanosch Frank 		unlocked = false;
208530ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
208630ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
208730ee2a98SJason J. Herne 			r = -EFAULT;
2088d3ed1ceeSMartin Schwidefsky 			break;
208930ee2a98SJason J. Herne 		}
209030ee2a98SJason J. Herne 
209130ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
209230ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
209330ee2a98SJason J. Herne 			r = -EINVAL;
2094d3ed1ceeSMartin Schwidefsky 			break;
209530ee2a98SJason J. Herne 		}
209630ee2a98SJason J. Herne 
2097fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2098bd096f64SJanosch Frank 		if (r) {
209964019a2eSPeter Xu 			r = fixup_user_fault(current->mm, hva,
2100bd096f64SJanosch Frank 					     FAULT_FLAG_WRITE, &unlocked);
210130ee2a98SJason J. Herne 			if (r)
2102d3ed1ceeSMartin Schwidefsky 				break;
210330ee2a98SJason J. Herne 		}
2104bd096f64SJanosch Frank 		if (!r)
2105bd096f64SJanosch Frank 			i++;
2106bd096f64SJanosch Frank 	}
21074f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2108d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
210930ee2a98SJason J. Herne out:
211030ee2a98SJason J. Herne 	kvfree(keys);
211130ee2a98SJason J. Herne 	return r;
211230ee2a98SJason J. Herne }
211330ee2a98SJason J. Herne 
21144036e387SClaudio Imbrenda /*
21154036e387SClaudio Imbrenda  * Base address and length must be sent at the start of each block, therefore
21164036e387SClaudio Imbrenda  * it's cheaper to send some clean data, as long as it's less than the size of
21174036e387SClaudio Imbrenda  * two longs.
21184036e387SClaudio Imbrenda  */
21194036e387SClaudio Imbrenda #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
21204036e387SClaudio Imbrenda /* for consistency */
21214036e387SClaudio Imbrenda #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
21224036e387SClaudio Imbrenda 
2123afdad616SClaudio Imbrenda static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2124afdad616SClaudio Imbrenda 			      u8 *res, unsigned long bufsize)
2125afdad616SClaudio Imbrenda {
2126afdad616SClaudio Imbrenda 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2127afdad616SClaudio Imbrenda 
2128afdad616SClaudio Imbrenda 	args->count = 0;
2129afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2130afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2131afdad616SClaudio Imbrenda 		/*
2132afdad616SClaudio Imbrenda 		 * We return an error if the first value was invalid, but we
2133afdad616SClaudio Imbrenda 		 * return successfully if at least one value was copied.
2134afdad616SClaudio Imbrenda 		 */
2135afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2136afdad616SClaudio Imbrenda 			return args->count ? 0 : -EFAULT;
2137afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2138afdad616SClaudio Imbrenda 			pgstev = 0;
2139afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2140afdad616SClaudio Imbrenda 		cur_gfn++;
2141afdad616SClaudio Imbrenda 	}
2142afdad616SClaudio Imbrenda 
2143afdad616SClaudio Imbrenda 	return 0;
2144afdad616SClaudio Imbrenda }
2145afdad616SClaudio Imbrenda 
2146c928bfc2SMaciej S. Szmigiero static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2147c928bfc2SMaciej S. Szmigiero 						     gfn_t gfn)
2148c928bfc2SMaciej S. Szmigiero {
2149c928bfc2SMaciej S. Szmigiero 	return ____gfn_to_memslot(slots, gfn, true);
2150c928bfc2SMaciej S. Szmigiero }
2151c928bfc2SMaciej S. Szmigiero 
2152afdad616SClaudio Imbrenda static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2153afdad616SClaudio Imbrenda 					      unsigned long cur_gfn)
2154afdad616SClaudio Imbrenda {
2155c928bfc2SMaciej S. Szmigiero 	struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2156afdad616SClaudio Imbrenda 	unsigned long ofs = cur_gfn - ms->base_gfn;
2157a54d8066SMaciej S. Szmigiero 	struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2158afdad616SClaudio Imbrenda 
2159afdad616SClaudio Imbrenda 	if (ms->base_gfn + ms->npages <= cur_gfn) {
2160a54d8066SMaciej S. Szmigiero 		mnode = rb_next(mnode);
2161afdad616SClaudio Imbrenda 		/* If we are above the highest slot, wrap around */
2162a54d8066SMaciej S. Szmigiero 		if (!mnode)
2163a54d8066SMaciej S. Szmigiero 			mnode = rb_first(&slots->gfn_tree);
2164afdad616SClaudio Imbrenda 
2165a54d8066SMaciej S. Szmigiero 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2166afdad616SClaudio Imbrenda 		ofs = 0;
2167afdad616SClaudio Imbrenda 	}
2168afdad616SClaudio Imbrenda 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2169a54d8066SMaciej S. Szmigiero 	while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2170a54d8066SMaciej S. Szmigiero 		ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2171b5c7e7ecSYury Norov 		ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2172afdad616SClaudio Imbrenda 	}
2173afdad616SClaudio Imbrenda 	return ms->base_gfn + ofs;
2174afdad616SClaudio Imbrenda }
2175afdad616SClaudio Imbrenda 
2176afdad616SClaudio Imbrenda static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2177afdad616SClaudio Imbrenda 			     u8 *res, unsigned long bufsize)
2178afdad616SClaudio Imbrenda {
2179afdad616SClaudio Imbrenda 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2180afdad616SClaudio Imbrenda 	struct kvm_memslots *slots = kvm_memslots(kvm);
2181afdad616SClaudio Imbrenda 	struct kvm_memory_slot *ms;
2182afdad616SClaudio Imbrenda 
2183a54d8066SMaciej S. Szmigiero 	if (unlikely(kvm_memslots_empty(slots)))
21840774a964SSean Christopherson 		return 0;
21850774a964SSean Christopherson 
2186afdad616SClaudio Imbrenda 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2187afdad616SClaudio Imbrenda 	ms = gfn_to_memslot(kvm, cur_gfn);
2188afdad616SClaudio Imbrenda 	args->count = 0;
2189afdad616SClaudio Imbrenda 	args->start_gfn = cur_gfn;
2190afdad616SClaudio Imbrenda 	if (!ms)
2191afdad616SClaudio Imbrenda 		return 0;
2192afdad616SClaudio Imbrenda 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
21936a656832SMaciej S. Szmigiero 	mem_end = kvm_s390_get_gfn_end(slots);
2194afdad616SClaudio Imbrenda 
2195afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2196afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2197afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2198afdad616SClaudio Imbrenda 			return 0;
2199afdad616SClaudio Imbrenda 		/* Decrement only if we actually flipped the bit to 0 */
2200afdad616SClaudio Imbrenda 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2201afdad616SClaudio Imbrenda 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2202afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2203afdad616SClaudio Imbrenda 			pgstev = 0;
2204afdad616SClaudio Imbrenda 		/* Save the value */
2205afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2206afdad616SClaudio Imbrenda 		/* If the next bit is too far away, stop. */
2207afdad616SClaudio Imbrenda 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2208afdad616SClaudio Imbrenda 			return 0;
2209afdad616SClaudio Imbrenda 		/* If we reached the previous "next", find the next one */
2210afdad616SClaudio Imbrenda 		if (cur_gfn == next_gfn)
2211afdad616SClaudio Imbrenda 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2212afdad616SClaudio Imbrenda 		/* Reached the end of memory or of the buffer, stop */
2213afdad616SClaudio Imbrenda 		if ((next_gfn >= mem_end) ||
2214afdad616SClaudio Imbrenda 		    (next_gfn - args->start_gfn >= bufsize))
2215afdad616SClaudio Imbrenda 			return 0;
2216afdad616SClaudio Imbrenda 		cur_gfn++;
2217afdad616SClaudio Imbrenda 		/* Reached the end of the current memslot, take the next one. */
2218afdad616SClaudio Imbrenda 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2219afdad616SClaudio Imbrenda 			ms = gfn_to_memslot(kvm, cur_gfn);
2220afdad616SClaudio Imbrenda 			if (!ms)
2221afdad616SClaudio Imbrenda 				return 0;
2222afdad616SClaudio Imbrenda 		}
2223afdad616SClaudio Imbrenda 	}
2224afdad616SClaudio Imbrenda 	return 0;
2225afdad616SClaudio Imbrenda }
2226afdad616SClaudio Imbrenda 
2227afdad616SClaudio Imbrenda /*
22284036e387SClaudio Imbrenda  * This function searches for the next page with dirty CMMA attributes, and
22294036e387SClaudio Imbrenda  * saves the attributes in the buffer up to either the end of the buffer or
22304036e387SClaudio Imbrenda  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
22314036e387SClaudio Imbrenda  * no trailing clean bytes are saved.
22324036e387SClaudio Imbrenda  * In case no dirty bits were found, or if CMMA was not enabled or used, the
22334036e387SClaudio Imbrenda  * output buffer will indicate 0 as length.
22344036e387SClaudio Imbrenda  */
22354036e387SClaudio Imbrenda static int kvm_s390_get_cmma_bits(struct kvm *kvm,
22364036e387SClaudio Imbrenda 				  struct kvm_s390_cmma_log *args)
22374036e387SClaudio Imbrenda {
2238afdad616SClaudio Imbrenda 	unsigned long bufsize;
2239afdad616SClaudio Imbrenda 	int srcu_idx, peek, ret;
2240afdad616SClaudio Imbrenda 	u8 *values;
22414036e387SClaudio Imbrenda 
2242afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
22434036e387SClaudio Imbrenda 		return -ENXIO;
22444036e387SClaudio Imbrenda 	/* Invalid/unsupported flags were specified */
22454036e387SClaudio Imbrenda 	if (args->flags & ~KVM_S390_CMMA_PEEK)
22464036e387SClaudio Imbrenda 		return -EINVAL;
22474036e387SClaudio Imbrenda 	/* Migration mode query, and we are not doing a migration */
22484036e387SClaudio Imbrenda 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2249afdad616SClaudio Imbrenda 	if (!peek && !kvm->arch.migration_mode)
22504036e387SClaudio Imbrenda 		return -EINVAL;
22514036e387SClaudio Imbrenda 	/* CMMA is disabled or was not used, or the buffer has length zero */
22524036e387SClaudio Imbrenda 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2253c9f0a2b8SJanosch Frank 	if (!bufsize || !kvm->mm->context.uses_cmm) {
22544036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
22554036e387SClaudio Imbrenda 		return 0;
22564036e387SClaudio Imbrenda 	}
22574036e387SClaudio Imbrenda 	/* We are not peeking, and there are no dirty pages */
2258afdad616SClaudio Imbrenda 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
22594036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
22604036e387SClaudio Imbrenda 		return 0;
22614036e387SClaudio Imbrenda 	}
22624036e387SClaudio Imbrenda 
2263afdad616SClaudio Imbrenda 	values = vmalloc(bufsize);
2264afdad616SClaudio Imbrenda 	if (!values)
22654036e387SClaudio Imbrenda 		return -ENOMEM;
22664036e387SClaudio Imbrenda 
2267d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
22684036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
2269afdad616SClaudio Imbrenda 	if (peek)
2270afdad616SClaudio Imbrenda 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2271afdad616SClaudio Imbrenda 	else
2272afdad616SClaudio Imbrenda 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
22734036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2274d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
22754036e387SClaudio Imbrenda 
2276afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
2277afdad616SClaudio Imbrenda 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2278afdad616SClaudio Imbrenda 	else
2279afdad616SClaudio Imbrenda 		args->remaining = 0;
22804036e387SClaudio Imbrenda 
2281afdad616SClaudio Imbrenda 	if (copy_to_user((void __user *)args->values, values, args->count))
2282afdad616SClaudio Imbrenda 		ret = -EFAULT;
2283afdad616SClaudio Imbrenda 
2284afdad616SClaudio Imbrenda 	vfree(values);
2285afdad616SClaudio Imbrenda 	return ret;
22864036e387SClaudio Imbrenda }
22874036e387SClaudio Imbrenda 
22884036e387SClaudio Imbrenda /*
22894036e387SClaudio Imbrenda  * This function sets the CMMA attributes for the given pages. If the input
22904036e387SClaudio Imbrenda  * buffer has zero length, no action is taken, otherwise the attributes are
2291c9f0a2b8SJanosch Frank  * set and the mm->context.uses_cmm flag is set.
22924036e387SClaudio Imbrenda  */
22934036e387SClaudio Imbrenda static int kvm_s390_set_cmma_bits(struct kvm *kvm,
22944036e387SClaudio Imbrenda 				  const struct kvm_s390_cmma_log *args)
22954036e387SClaudio Imbrenda {
22964036e387SClaudio Imbrenda 	unsigned long hva, mask, pgstev, i;
22974036e387SClaudio Imbrenda 	uint8_t *bits;
22984036e387SClaudio Imbrenda 	int srcu_idx, r = 0;
22994036e387SClaudio Imbrenda 
23004036e387SClaudio Imbrenda 	mask = args->mask;
23014036e387SClaudio Imbrenda 
23024036e387SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
23034036e387SClaudio Imbrenda 		return -ENXIO;
23044036e387SClaudio Imbrenda 	/* invalid/unsupported flags */
23054036e387SClaudio Imbrenda 	if (args->flags != 0)
23064036e387SClaudio Imbrenda 		return -EINVAL;
23074036e387SClaudio Imbrenda 	/* Enforce sane limit on memory allocation */
23084036e387SClaudio Imbrenda 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
23094036e387SClaudio Imbrenda 		return -EINVAL;
23104036e387SClaudio Imbrenda 	/* Nothing to do */
23114036e387SClaudio Imbrenda 	if (args->count == 0)
23124036e387SClaudio Imbrenda 		return 0;
23134036e387SClaudio Imbrenda 
231442bc47b3SKees Cook 	bits = vmalloc(array_size(sizeof(*bits), args->count));
23154036e387SClaudio Imbrenda 	if (!bits)
23164036e387SClaudio Imbrenda 		return -ENOMEM;
23174036e387SClaudio Imbrenda 
23184036e387SClaudio Imbrenda 	r = copy_from_user(bits, (void __user *)args->values, args->count);
23194036e387SClaudio Imbrenda 	if (r) {
23204036e387SClaudio Imbrenda 		r = -EFAULT;
23214036e387SClaudio Imbrenda 		goto out;
23224036e387SClaudio Imbrenda 	}
23234036e387SClaudio Imbrenda 
2324d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
23254036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
23264036e387SClaudio Imbrenda 	for (i = 0; i < args->count; i++) {
23274036e387SClaudio Imbrenda 		hva = gfn_to_hva(kvm, args->start_gfn + i);
23284036e387SClaudio Imbrenda 		if (kvm_is_error_hva(hva)) {
23294036e387SClaudio Imbrenda 			r = -EFAULT;
23304036e387SClaudio Imbrenda 			break;
23314036e387SClaudio Imbrenda 		}
23324036e387SClaudio Imbrenda 
23334036e387SClaudio Imbrenda 		pgstev = bits[i];
23344036e387SClaudio Imbrenda 		pgstev = pgstev << 24;
23351bab1c02SClaudio Imbrenda 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
23364036e387SClaudio Imbrenda 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
23374036e387SClaudio Imbrenda 	}
23384036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2339d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
23404036e387SClaudio Imbrenda 
2341c9f0a2b8SJanosch Frank 	if (!kvm->mm->context.uses_cmm) {
2342d8ed45c5SMichel Lespinasse 		mmap_write_lock(kvm->mm);
2343c9f0a2b8SJanosch Frank 		kvm->mm->context.uses_cmm = 1;
2344d8ed45c5SMichel Lespinasse 		mmap_write_unlock(kvm->mm);
23454036e387SClaudio Imbrenda 	}
23464036e387SClaudio Imbrenda out:
23474036e387SClaudio Imbrenda 	vfree(bits);
23484036e387SClaudio Imbrenda 	return r;
23494036e387SClaudio Imbrenda }
23504036e387SClaudio Imbrenda 
2351be48d86fSClaudio Imbrenda /**
2352be48d86fSClaudio Imbrenda  * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2353be48d86fSClaudio Imbrenda  * non protected.
2354be48d86fSClaudio Imbrenda  * @kvm: the VM whose protected vCPUs are to be converted
2355be48d86fSClaudio Imbrenda  * @rc: return value for the RC field of the UVC (in case of error)
2356be48d86fSClaudio Imbrenda  * @rrc: return value for the RRC field of the UVC (in case of error)
2357be48d86fSClaudio Imbrenda  *
2358be48d86fSClaudio Imbrenda  * Does not stop in case of error, tries to convert as many
2359be48d86fSClaudio Imbrenda  * CPUs as possible. In case of error, the RC and RRC of the last error are
2360be48d86fSClaudio Imbrenda  * returned.
2361be48d86fSClaudio Imbrenda  *
2362be48d86fSClaudio Imbrenda  * Return: 0 in case of success, otherwise -EIO
2363be48d86fSClaudio Imbrenda  */
2364be48d86fSClaudio Imbrenda int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
236529b40f10SJanosch Frank {
236629b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
236746808a4cSMarc Zyngier 	unsigned long i;
2368be48d86fSClaudio Imbrenda 	u16 _rc, _rrc;
2369be48d86fSClaudio Imbrenda 	int ret = 0;
237029b40f10SJanosch Frank 
237129b40f10SJanosch Frank 	/*
237229b40f10SJanosch Frank 	 * We ignore failures and try to destroy as many CPUs as possible.
237329b40f10SJanosch Frank 	 * At the same time we must not free the assigned resources when
237429b40f10SJanosch Frank 	 * this fails, as the ultravisor has still access to that memory.
237529b40f10SJanosch Frank 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
237629b40f10SJanosch Frank 	 * behind.
237729b40f10SJanosch Frank 	 * We want to return the first failure rc and rrc, though.
237829b40f10SJanosch Frank 	 */
237929b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
238029b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
2381be48d86fSClaudio Imbrenda 		if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2382be48d86fSClaudio Imbrenda 			*rc = _rc;
2383be48d86fSClaudio Imbrenda 			*rrc = _rrc;
238429b40f10SJanosch Frank 			ret = -EIO;
238529b40f10SJanosch Frank 		}
238629b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
238729b40f10SJanosch Frank 	}
2388ee6a569dSMichael Mueller 	/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2389ee6a569dSMichael Mueller 	if (use_gisa)
2390ee6a569dSMichael Mueller 		kvm_s390_gisa_enable(kvm);
239129b40f10SJanosch Frank 	return ret;
239229b40f10SJanosch Frank }
239329b40f10SJanosch Frank 
2394be48d86fSClaudio Imbrenda /**
2395be48d86fSClaudio Imbrenda  * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2396be48d86fSClaudio Imbrenda  * to protected.
2397be48d86fSClaudio Imbrenda  * @kvm: the VM whose protected vCPUs are to be converted
2398be48d86fSClaudio Imbrenda  * @rc: return value for the RC field of the UVC (in case of error)
2399be48d86fSClaudio Imbrenda  * @rrc: return value for the RRC field of the UVC (in case of error)
2400be48d86fSClaudio Imbrenda  *
2401be48d86fSClaudio Imbrenda  * Tries to undo the conversion in case of error.
2402be48d86fSClaudio Imbrenda  *
2403be48d86fSClaudio Imbrenda  * Return: 0 in case of success, otherwise -EIO
2404be48d86fSClaudio Imbrenda  */
240529b40f10SJanosch Frank static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
240629b40f10SJanosch Frank {
240746808a4cSMarc Zyngier 	unsigned long i;
240846808a4cSMarc Zyngier 	int r = 0;
240929b40f10SJanosch Frank 	u16 dummy;
241029b40f10SJanosch Frank 
241129b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
241229b40f10SJanosch Frank 
2413ee6a569dSMichael Mueller 	/* Disable the GISA if the ultravisor does not support AIV. */
2414ee6a569dSMichael Mueller 	if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
2415ee6a569dSMichael Mueller 		kvm_s390_gisa_disable(kvm);
2416ee6a569dSMichael Mueller 
241729b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
241829b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
241929b40f10SJanosch Frank 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
242029b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
242129b40f10SJanosch Frank 		if (r)
242229b40f10SJanosch Frank 			break;
242329b40f10SJanosch Frank 	}
242429b40f10SJanosch Frank 	if (r)
242529b40f10SJanosch Frank 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
242629b40f10SJanosch Frank 	return r;
242729b40f10SJanosch Frank }
242829b40f10SJanosch Frank 
242935d02493SJanosch Frank /*
243035d02493SJanosch Frank  * Here we provide user space with a direct interface to query UV
243135d02493SJanosch Frank  * related data like UV maxima and available features as well as
243235d02493SJanosch Frank  * feature specific data.
243335d02493SJanosch Frank  *
243435d02493SJanosch Frank  * To facilitate future extension of the data structures we'll try to
243535d02493SJanosch Frank  * write data up to the maximum requested length.
243635d02493SJanosch Frank  */
243735d02493SJanosch Frank static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
243835d02493SJanosch Frank {
243935d02493SJanosch Frank 	ssize_t len_min;
244035d02493SJanosch Frank 
244135d02493SJanosch Frank 	switch (info->header.id) {
244235d02493SJanosch Frank 	case KVM_PV_INFO_VM: {
244335d02493SJanosch Frank 		len_min =  sizeof(info->header) + sizeof(info->vm);
244435d02493SJanosch Frank 
244535d02493SJanosch Frank 		if (info->header.len_max < len_min)
244635d02493SJanosch Frank 			return -EINVAL;
244735d02493SJanosch Frank 
244835d02493SJanosch Frank 		memcpy(info->vm.inst_calls_list,
244935d02493SJanosch Frank 		       uv_info.inst_calls_list,
245035d02493SJanosch Frank 		       sizeof(uv_info.inst_calls_list));
245135d02493SJanosch Frank 
245235d02493SJanosch Frank 		/* It's max cpuid not max cpus, so it's off by one */
245335d02493SJanosch Frank 		info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
245435d02493SJanosch Frank 		info->vm.max_guests = uv_info.max_num_sec_conf;
245535d02493SJanosch Frank 		info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
245635d02493SJanosch Frank 		info->vm.feature_indication = uv_info.uv_feature_indications;
245735d02493SJanosch Frank 
245835d02493SJanosch Frank 		return len_min;
245935d02493SJanosch Frank 	}
2460fe9a93e0SJanosch Frank 	case KVM_PV_INFO_DUMP: {
2461fe9a93e0SJanosch Frank 		len_min =  sizeof(info->header) + sizeof(info->dump);
2462fe9a93e0SJanosch Frank 
2463fe9a93e0SJanosch Frank 		if (info->header.len_max < len_min)
2464fe9a93e0SJanosch Frank 			return -EINVAL;
2465fe9a93e0SJanosch Frank 
2466fe9a93e0SJanosch Frank 		info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2467fe9a93e0SJanosch Frank 		info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2468fe9a93e0SJanosch Frank 		info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2469fe9a93e0SJanosch Frank 		return len_min;
2470fe9a93e0SJanosch Frank 	}
247135d02493SJanosch Frank 	default:
247235d02493SJanosch Frank 		return -EINVAL;
247335d02493SJanosch Frank 	}
247435d02493SJanosch Frank }
247535d02493SJanosch Frank 
24760460eb35SJanosch Frank static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
24770460eb35SJanosch Frank 			   struct kvm_s390_pv_dmp dmp)
24780460eb35SJanosch Frank {
24790460eb35SJanosch Frank 	int r = -EINVAL;
24800460eb35SJanosch Frank 	void __user *result_buff = (void __user *)dmp.buff_addr;
24810460eb35SJanosch Frank 
24820460eb35SJanosch Frank 	switch (dmp.subcmd) {
24830460eb35SJanosch Frank 	case KVM_PV_DUMP_INIT: {
24840460eb35SJanosch Frank 		if (kvm->arch.pv.dumping)
24850460eb35SJanosch Frank 			break;
24860460eb35SJanosch Frank 
24870460eb35SJanosch Frank 		/*
24880460eb35SJanosch Frank 		 * Block SIE entry as concurrent dump UVCs could lead
24890460eb35SJanosch Frank 		 * to validities.
24900460eb35SJanosch Frank 		 */
24910460eb35SJanosch Frank 		kvm_s390_vcpu_block_all(kvm);
24920460eb35SJanosch Frank 
24930460eb35SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
24940460eb35SJanosch Frank 				  UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
24950460eb35SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
24960460eb35SJanosch Frank 			     cmd->rc, cmd->rrc);
24970460eb35SJanosch Frank 		if (!r) {
24980460eb35SJanosch Frank 			kvm->arch.pv.dumping = true;
24990460eb35SJanosch Frank 		} else {
25000460eb35SJanosch Frank 			kvm_s390_vcpu_unblock_all(kvm);
25010460eb35SJanosch Frank 			r = -EINVAL;
25020460eb35SJanosch Frank 		}
25030460eb35SJanosch Frank 		break;
25040460eb35SJanosch Frank 	}
25050460eb35SJanosch Frank 	case KVM_PV_DUMP_CONFIG_STOR_STATE: {
25060460eb35SJanosch Frank 		if (!kvm->arch.pv.dumping)
25070460eb35SJanosch Frank 			break;
25080460eb35SJanosch Frank 
25090460eb35SJanosch Frank 		/*
25100460eb35SJanosch Frank 		 * gaddr is an output parameter since we might stop
25110460eb35SJanosch Frank 		 * early. As dmp will be copied back in our caller, we
25120460eb35SJanosch Frank 		 * don't need to do it ourselves.
25130460eb35SJanosch Frank 		 */
25140460eb35SJanosch Frank 		r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
25150460eb35SJanosch Frank 						&cmd->rc, &cmd->rrc);
25160460eb35SJanosch Frank 		break;
25170460eb35SJanosch Frank 	}
25180460eb35SJanosch Frank 	case KVM_PV_DUMP_COMPLETE: {
25190460eb35SJanosch Frank 		if (!kvm->arch.pv.dumping)
25200460eb35SJanosch Frank 			break;
25210460eb35SJanosch Frank 
25220460eb35SJanosch Frank 		r = -EINVAL;
25230460eb35SJanosch Frank 		if (dmp.buff_len < uv_info.conf_dump_finalize_len)
25240460eb35SJanosch Frank 			break;
25250460eb35SJanosch Frank 
25260460eb35SJanosch Frank 		r = kvm_s390_pv_dump_complete(kvm, result_buff,
25270460eb35SJanosch Frank 					      &cmd->rc, &cmd->rrc);
25280460eb35SJanosch Frank 		break;
25290460eb35SJanosch Frank 	}
25300460eb35SJanosch Frank 	default:
25310460eb35SJanosch Frank 		r = -ENOTTY;
25320460eb35SJanosch Frank 		break;
25330460eb35SJanosch Frank 	}
25340460eb35SJanosch Frank 
25350460eb35SJanosch Frank 	return r;
25360460eb35SJanosch Frank }
25370460eb35SJanosch Frank 
253829b40f10SJanosch Frank static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
253929b40f10SJanosch Frank {
2540fb491d55SClaudio Imbrenda 	const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2541fb491d55SClaudio Imbrenda 	void __user *argp = (void __user *)cmd->data;
254229b40f10SJanosch Frank 	int r = 0;
254329b40f10SJanosch Frank 	u16 dummy;
2544fb491d55SClaudio Imbrenda 
2545fb491d55SClaudio Imbrenda 	if (need_lock)
2546fb491d55SClaudio Imbrenda 		mutex_lock(&kvm->lock);
254729b40f10SJanosch Frank 
254829b40f10SJanosch Frank 	switch (cmd->cmd) {
254929b40f10SJanosch Frank 	case KVM_PV_ENABLE: {
255029b40f10SJanosch Frank 		r = -EINVAL;
255129b40f10SJanosch Frank 		if (kvm_s390_pv_is_protected(kvm))
255229b40f10SJanosch Frank 			break;
255329b40f10SJanosch Frank 
255429b40f10SJanosch Frank 		/*
255529b40f10SJanosch Frank 		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
255629b40f10SJanosch Frank 		 *  esca, we need no cleanup in the error cases below
255729b40f10SJanosch Frank 		 */
255829b40f10SJanosch Frank 		r = sca_switch_to_extended(kvm);
255929b40f10SJanosch Frank 		if (r)
256029b40f10SJanosch Frank 			break;
256129b40f10SJanosch Frank 
2562d8ed45c5SMichel Lespinasse 		mmap_write_lock(current->mm);
2563fa0c5eabSJanosch Frank 		r = gmap_mark_unmergeable();
2564d8ed45c5SMichel Lespinasse 		mmap_write_unlock(current->mm);
2565fa0c5eabSJanosch Frank 		if (r)
2566fa0c5eabSJanosch Frank 			break;
2567fa0c5eabSJanosch Frank 
256829b40f10SJanosch Frank 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
256929b40f10SJanosch Frank 		if (r)
257029b40f10SJanosch Frank 			break;
257129b40f10SJanosch Frank 
257229b40f10SJanosch Frank 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
257329b40f10SJanosch Frank 		if (r)
257429b40f10SJanosch Frank 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
25750890ddeaSChristian Borntraeger 
25760890ddeaSChristian Borntraeger 		/* we need to block service interrupts from now on */
25770890ddeaSChristian Borntraeger 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
257829b40f10SJanosch Frank 		break;
257929b40f10SJanosch Frank 	}
2580fb491d55SClaudio Imbrenda 	case KVM_PV_ASYNC_CLEANUP_PREPARE:
2581fb491d55SClaudio Imbrenda 		r = -EINVAL;
2582fb491d55SClaudio Imbrenda 		if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2583fb491d55SClaudio Imbrenda 			break;
2584fb491d55SClaudio Imbrenda 
2585fb491d55SClaudio Imbrenda 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2586fb491d55SClaudio Imbrenda 		/*
2587fb491d55SClaudio Imbrenda 		 * If a CPU could not be destroyed, destroy VM will also fail.
2588fb491d55SClaudio Imbrenda 		 * There is no point in trying to destroy it. Instead return
2589fb491d55SClaudio Imbrenda 		 * the rc and rrc from the first CPU that failed destroying.
2590fb491d55SClaudio Imbrenda 		 */
2591fb491d55SClaudio Imbrenda 		if (r)
2592fb491d55SClaudio Imbrenda 			break;
2593fb491d55SClaudio Imbrenda 		r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2594fb491d55SClaudio Imbrenda 
2595fb491d55SClaudio Imbrenda 		/* no need to block service interrupts any more */
2596fb491d55SClaudio Imbrenda 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2597fb491d55SClaudio Imbrenda 		break;
2598fb491d55SClaudio Imbrenda 	case KVM_PV_ASYNC_CLEANUP_PERFORM:
2599fb491d55SClaudio Imbrenda 		r = -EINVAL;
2600fb491d55SClaudio Imbrenda 		if (!async_destroy)
2601fb491d55SClaudio Imbrenda 			break;
2602fb491d55SClaudio Imbrenda 		/* kvm->lock must not be held; this is asserted inside the function. */
2603fb491d55SClaudio Imbrenda 		r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2604fb491d55SClaudio Imbrenda 		break;
260529b40f10SJanosch Frank 	case KVM_PV_DISABLE: {
260629b40f10SJanosch Frank 		r = -EINVAL;
260729b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
260829b40f10SJanosch Frank 			break;
260929b40f10SJanosch Frank 
261029b40f10SJanosch Frank 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
261129b40f10SJanosch Frank 		/*
261229b40f10SJanosch Frank 		 * If a CPU could not be destroyed, destroy VM will also fail.
261329b40f10SJanosch Frank 		 * There is no point in trying to destroy it. Instead return
261429b40f10SJanosch Frank 		 * the rc and rrc from the first CPU that failed destroying.
261529b40f10SJanosch Frank 		 */
261629b40f10SJanosch Frank 		if (r)
261729b40f10SJanosch Frank 			break;
2618fb491d55SClaudio Imbrenda 		r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
26190890ddeaSChristian Borntraeger 
26200890ddeaSChristian Borntraeger 		/* no need to block service interrupts any more */
26210890ddeaSChristian Borntraeger 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
262229b40f10SJanosch Frank 		break;
262329b40f10SJanosch Frank 	}
262429b40f10SJanosch Frank 	case KVM_PV_SET_SEC_PARMS: {
262529b40f10SJanosch Frank 		struct kvm_s390_pv_sec_parm parms = {};
262629b40f10SJanosch Frank 		void *hdr;
262729b40f10SJanosch Frank 
262829b40f10SJanosch Frank 		r = -EINVAL;
262929b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
263029b40f10SJanosch Frank 			break;
263129b40f10SJanosch Frank 
263229b40f10SJanosch Frank 		r = -EFAULT;
263329b40f10SJanosch Frank 		if (copy_from_user(&parms, argp, sizeof(parms)))
263429b40f10SJanosch Frank 			break;
263529b40f10SJanosch Frank 
263629b40f10SJanosch Frank 		/* Currently restricted to 8KB */
263729b40f10SJanosch Frank 		r = -EINVAL;
263829b40f10SJanosch Frank 		if (parms.length > PAGE_SIZE * 2)
263929b40f10SJanosch Frank 			break;
264029b40f10SJanosch Frank 
264129b40f10SJanosch Frank 		r = -ENOMEM;
264229b40f10SJanosch Frank 		hdr = vmalloc(parms.length);
264329b40f10SJanosch Frank 		if (!hdr)
264429b40f10SJanosch Frank 			break;
264529b40f10SJanosch Frank 
264629b40f10SJanosch Frank 		r = -EFAULT;
264729b40f10SJanosch Frank 		if (!copy_from_user(hdr, (void __user *)parms.origin,
264829b40f10SJanosch Frank 				    parms.length))
264929b40f10SJanosch Frank 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
265029b40f10SJanosch Frank 						      &cmd->rc, &cmd->rrc);
265129b40f10SJanosch Frank 
265229b40f10SJanosch Frank 		vfree(hdr);
265329b40f10SJanosch Frank 		break;
265429b40f10SJanosch Frank 	}
265529b40f10SJanosch Frank 	case KVM_PV_UNPACK: {
265629b40f10SJanosch Frank 		struct kvm_s390_pv_unp unp = {};
265729b40f10SJanosch Frank 
265829b40f10SJanosch Frank 		r = -EINVAL;
26591ed576a2SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
266029b40f10SJanosch Frank 			break;
266129b40f10SJanosch Frank 
266229b40f10SJanosch Frank 		r = -EFAULT;
266329b40f10SJanosch Frank 		if (copy_from_user(&unp, argp, sizeof(unp)))
266429b40f10SJanosch Frank 			break;
266529b40f10SJanosch Frank 
266629b40f10SJanosch Frank 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
266729b40f10SJanosch Frank 				       &cmd->rc, &cmd->rrc);
266829b40f10SJanosch Frank 		break;
266929b40f10SJanosch Frank 	}
267029b40f10SJanosch Frank 	case KVM_PV_VERIFY: {
267129b40f10SJanosch Frank 		r = -EINVAL;
267229b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
267329b40f10SJanosch Frank 			break;
267429b40f10SJanosch Frank 
267529b40f10SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
267629b40f10SJanosch Frank 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
267729b40f10SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
267829b40f10SJanosch Frank 			     cmd->rrc);
267929b40f10SJanosch Frank 		break;
268029b40f10SJanosch Frank 	}
2681e0d2773dSJanosch Frank 	case KVM_PV_PREP_RESET: {
2682e0d2773dSJanosch Frank 		r = -EINVAL;
2683e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2684e0d2773dSJanosch Frank 			break;
2685e0d2773dSJanosch Frank 
2686e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2687e0d2773dSJanosch Frank 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2688e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2689e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2690e0d2773dSJanosch Frank 		break;
2691e0d2773dSJanosch Frank 	}
2692e0d2773dSJanosch Frank 	case KVM_PV_UNSHARE_ALL: {
2693e0d2773dSJanosch Frank 		r = -EINVAL;
2694e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2695e0d2773dSJanosch Frank 			break;
2696e0d2773dSJanosch Frank 
2697e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2698e0d2773dSJanosch Frank 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2699e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2700e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2701e0d2773dSJanosch Frank 		break;
2702e0d2773dSJanosch Frank 	}
270335d02493SJanosch Frank 	case KVM_PV_INFO: {
270435d02493SJanosch Frank 		struct kvm_s390_pv_info info = {};
270535d02493SJanosch Frank 		ssize_t data_len;
270635d02493SJanosch Frank 
270735d02493SJanosch Frank 		/*
270835d02493SJanosch Frank 		 * No need to check the VM protection here.
270935d02493SJanosch Frank 		 *
271035d02493SJanosch Frank 		 * Maybe user space wants to query some of the data
271135d02493SJanosch Frank 		 * when the VM is still unprotected. If we see the
271235d02493SJanosch Frank 		 * need to fence a new data command we can still
271335d02493SJanosch Frank 		 * return an error in the info handler.
271435d02493SJanosch Frank 		 */
271535d02493SJanosch Frank 
271635d02493SJanosch Frank 		r = -EFAULT;
271735d02493SJanosch Frank 		if (copy_from_user(&info, argp, sizeof(info.header)))
271835d02493SJanosch Frank 			break;
271935d02493SJanosch Frank 
272035d02493SJanosch Frank 		r = -EINVAL;
272135d02493SJanosch Frank 		if (info.header.len_max < sizeof(info.header))
272235d02493SJanosch Frank 			break;
272335d02493SJanosch Frank 
272435d02493SJanosch Frank 		data_len = kvm_s390_handle_pv_info(&info);
272535d02493SJanosch Frank 		if (data_len < 0) {
272635d02493SJanosch Frank 			r = data_len;
272735d02493SJanosch Frank 			break;
272835d02493SJanosch Frank 		}
272935d02493SJanosch Frank 		/*
273035d02493SJanosch Frank 		 * If a data command struct is extended (multiple
273135d02493SJanosch Frank 		 * times) this can be used to determine how much of it
273235d02493SJanosch Frank 		 * is valid.
273335d02493SJanosch Frank 		 */
273435d02493SJanosch Frank 		info.header.len_written = data_len;
273535d02493SJanosch Frank 
273635d02493SJanosch Frank 		r = -EFAULT;
273735d02493SJanosch Frank 		if (copy_to_user(argp, &info, data_len))
273835d02493SJanosch Frank 			break;
273935d02493SJanosch Frank 
274035d02493SJanosch Frank 		r = 0;
274135d02493SJanosch Frank 		break;
274235d02493SJanosch Frank 	}
27430460eb35SJanosch Frank 	case KVM_PV_DUMP: {
27440460eb35SJanosch Frank 		struct kvm_s390_pv_dmp dmp;
27450460eb35SJanosch Frank 
27460460eb35SJanosch Frank 		r = -EINVAL;
27470460eb35SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
27480460eb35SJanosch Frank 			break;
27490460eb35SJanosch Frank 
27500460eb35SJanosch Frank 		r = -EFAULT;
27510460eb35SJanosch Frank 		if (copy_from_user(&dmp, argp, sizeof(dmp)))
27520460eb35SJanosch Frank 			break;
27530460eb35SJanosch Frank 
27540460eb35SJanosch Frank 		r = kvm_s390_pv_dmp(kvm, cmd, dmp);
27550460eb35SJanosch Frank 		if (r)
27560460eb35SJanosch Frank 			break;
27570460eb35SJanosch Frank 
27580460eb35SJanosch Frank 		if (copy_to_user(argp, &dmp, sizeof(dmp))) {
27590460eb35SJanosch Frank 			r = -EFAULT;
27600460eb35SJanosch Frank 			break;
27610460eb35SJanosch Frank 		}
27620460eb35SJanosch Frank 
27630460eb35SJanosch Frank 		break;
27640460eb35SJanosch Frank 	}
276529b40f10SJanosch Frank 	default:
276629b40f10SJanosch Frank 		r = -ENOTTY;
276729b40f10SJanosch Frank 	}
2768fb491d55SClaudio Imbrenda 	if (need_lock)
2769fb491d55SClaudio Imbrenda 		mutex_unlock(&kvm->lock);
2770fb491d55SClaudio Imbrenda 
277129b40f10SJanosch Frank 	return r;
277229b40f10SJanosch Frank }
277329b40f10SJanosch Frank 
2774e9e9feebSJanis Schoetterl-Glausch static bool access_key_invalid(u8 access_key)
2775e9e9feebSJanis Schoetterl-Glausch {
2776e9e9feebSJanis Schoetterl-Glausch 	return access_key > 0xf;
2777e9e9feebSJanis Schoetterl-Glausch }
2778e9e9feebSJanis Schoetterl-Glausch 
2779ef11c946SJanis Schoetterl-Glausch static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2780ef11c946SJanis Schoetterl-Glausch {
2781ef11c946SJanis Schoetterl-Glausch 	void __user *uaddr = (void __user *)mop->buf;
2782ef11c946SJanis Schoetterl-Glausch 	u64 supported_flags;
2783ef11c946SJanis Schoetterl-Glausch 	void *tmpbuf = NULL;
2784ef11c946SJanis Schoetterl-Glausch 	int r, srcu_idx;
2785ef11c946SJanis Schoetterl-Glausch 
2786ef11c946SJanis Schoetterl-Glausch 	supported_flags = KVM_S390_MEMOP_F_SKEY_PROTECTION
2787ef11c946SJanis Schoetterl-Glausch 			  | KVM_S390_MEMOP_F_CHECK_ONLY;
27883d9042f8SJanis Schoetterl-Glausch 	if (mop->flags & ~supported_flags || !mop->size)
2789ef11c946SJanis Schoetterl-Glausch 		return -EINVAL;
2790ef11c946SJanis Schoetterl-Glausch 	if (mop->size > MEM_OP_MAX_SIZE)
2791ef11c946SJanis Schoetterl-Glausch 		return -E2BIG;
2792b5d12744SJanis Schoetterl-Glausch 	/*
2793b5d12744SJanis Schoetterl-Glausch 	 * This is technically a heuristic only, if the kvm->lock is not
2794b5d12744SJanis Schoetterl-Glausch 	 * taken, it is not guaranteed that the vm is/remains non-protected.
2795b5d12744SJanis Schoetterl-Glausch 	 * This is ok from a kernel perspective, wrongdoing is detected
2796b5d12744SJanis Schoetterl-Glausch 	 * on the access, -EFAULT is returned and the vm may crash the
2797b5d12744SJanis Schoetterl-Glausch 	 * next time it accesses the memory in question.
2798b5d12744SJanis Schoetterl-Glausch 	 * There is no sane usecase to do switching and a memop on two
2799b5d12744SJanis Schoetterl-Glausch 	 * different CPUs at the same time.
2800b5d12744SJanis Schoetterl-Glausch 	 */
2801b5d12744SJanis Schoetterl-Glausch 	if (kvm_s390_pv_get_handle(kvm))
2802ef11c946SJanis Schoetterl-Glausch 		return -EINVAL;
2803ef11c946SJanis Schoetterl-Glausch 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2804ef11c946SJanis Schoetterl-Glausch 		if (access_key_invalid(mop->key))
2805ef11c946SJanis Schoetterl-Glausch 			return -EINVAL;
2806ef11c946SJanis Schoetterl-Glausch 	} else {
2807ef11c946SJanis Schoetterl-Glausch 		mop->key = 0;
2808ef11c946SJanis Schoetterl-Glausch 	}
2809ef11c946SJanis Schoetterl-Glausch 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2810ef11c946SJanis Schoetterl-Glausch 		tmpbuf = vmalloc(mop->size);
2811ef11c946SJanis Schoetterl-Glausch 		if (!tmpbuf)
2812ef11c946SJanis Schoetterl-Glausch 			return -ENOMEM;
2813ef11c946SJanis Schoetterl-Glausch 	}
2814ef11c946SJanis Schoetterl-Glausch 
2815ef11c946SJanis Schoetterl-Glausch 	srcu_idx = srcu_read_lock(&kvm->srcu);
2816ef11c946SJanis Schoetterl-Glausch 
2817ef11c946SJanis Schoetterl-Glausch 	if (kvm_is_error_gpa(kvm, mop->gaddr)) {
2818ef11c946SJanis Schoetterl-Glausch 		r = PGM_ADDRESSING;
2819ef11c946SJanis Schoetterl-Glausch 		goto out_unlock;
2820ef11c946SJanis Schoetterl-Glausch 	}
2821ef11c946SJanis Schoetterl-Glausch 
2822ef11c946SJanis Schoetterl-Glausch 	switch (mop->op) {
2823ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEMOP_ABSOLUTE_READ: {
2824ef11c946SJanis Schoetterl-Glausch 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2825ef11c946SJanis Schoetterl-Glausch 			r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_FETCH, mop->key);
2826ef11c946SJanis Schoetterl-Glausch 		} else {
2827ef11c946SJanis Schoetterl-Glausch 			r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2828ef11c946SJanis Schoetterl-Glausch 						      mop->size, GACC_FETCH, mop->key);
2829ef11c946SJanis Schoetterl-Glausch 			if (r == 0) {
2830ef11c946SJanis Schoetterl-Glausch 				if (copy_to_user(uaddr, tmpbuf, mop->size))
2831ef11c946SJanis Schoetterl-Glausch 					r = -EFAULT;
2832ef11c946SJanis Schoetterl-Glausch 			}
2833ef11c946SJanis Schoetterl-Glausch 		}
2834ef11c946SJanis Schoetterl-Glausch 		break;
2835ef11c946SJanis Schoetterl-Glausch 	}
2836ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEMOP_ABSOLUTE_WRITE: {
2837ef11c946SJanis Schoetterl-Glausch 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2838ef11c946SJanis Schoetterl-Glausch 			r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_STORE, mop->key);
2839ef11c946SJanis Schoetterl-Glausch 		} else {
2840ef11c946SJanis Schoetterl-Glausch 			if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2841ef11c946SJanis Schoetterl-Glausch 				r = -EFAULT;
2842ef11c946SJanis Schoetterl-Glausch 				break;
2843ef11c946SJanis Schoetterl-Glausch 			}
2844ef11c946SJanis Schoetterl-Glausch 			r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2845ef11c946SJanis Schoetterl-Glausch 						      mop->size, GACC_STORE, mop->key);
2846ef11c946SJanis Schoetterl-Glausch 		}
2847ef11c946SJanis Schoetterl-Glausch 		break;
2848ef11c946SJanis Schoetterl-Glausch 	}
2849ef11c946SJanis Schoetterl-Glausch 	default:
2850ef11c946SJanis Schoetterl-Glausch 		r = -EINVAL;
2851ef11c946SJanis Schoetterl-Glausch 	}
2852ef11c946SJanis Schoetterl-Glausch 
2853ef11c946SJanis Schoetterl-Glausch out_unlock:
2854ef11c946SJanis Schoetterl-Glausch 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2855ef11c946SJanis Schoetterl-Glausch 
2856ef11c946SJanis Schoetterl-Glausch 	vfree(tmpbuf);
2857ef11c946SJanis Schoetterl-Glausch 	return r;
2858ef11c946SJanis Schoetterl-Glausch }
2859ef11c946SJanis Schoetterl-Glausch 
2860b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
2861b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
2862b0c632dbSHeiko Carstens {
2863b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
2864b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2865f2061656SDominik Dingel 	struct kvm_device_attr attr;
2866b0c632dbSHeiko Carstens 	int r;
2867b0c632dbSHeiko Carstens 
2868b0c632dbSHeiko Carstens 	switch (ioctl) {
2869ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
2870ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2871ba5c1e9bSCarsten Otte 
2872ba5c1e9bSCarsten Otte 		r = -EFAULT;
2873ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2874ba5c1e9bSCarsten Otte 			break;
2875ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
2876ba5c1e9bSCarsten Otte 		break;
2877ba5c1e9bSCarsten Otte 	}
287884223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
287984223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
288084223598SCornelia Huck 
288184223598SCornelia Huck 		r = -EINVAL;
288284223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
288384223598SCornelia Huck 			/* Set up dummy routing. */
288484223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
2885152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
288684223598SCornelia Huck 		}
288784223598SCornelia Huck 		break;
288884223598SCornelia Huck 	}
2889f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
2890f2061656SDominik Dingel 		r = -EFAULT;
2891f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2892f2061656SDominik Dingel 			break;
2893f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
2894f2061656SDominik Dingel 		break;
2895f2061656SDominik Dingel 	}
2896f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
2897f2061656SDominik Dingel 		r = -EFAULT;
2898f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2899f2061656SDominik Dingel 			break;
2900f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
2901f2061656SDominik Dingel 		break;
2902f2061656SDominik Dingel 	}
2903f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
2904f2061656SDominik Dingel 		r = -EFAULT;
2905f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2906f2061656SDominik Dingel 			break;
2907f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
2908f2061656SDominik Dingel 		break;
2909f2061656SDominik Dingel 	}
291030ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
291130ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
291230ee2a98SJason J. Herne 
291330ee2a98SJason J. Herne 		r = -EFAULT;
291430ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
291530ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
291630ee2a98SJason J. Herne 			break;
291730ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
291830ee2a98SJason J. Herne 		break;
291930ee2a98SJason J. Herne 	}
292030ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
292130ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
292230ee2a98SJason J. Herne 
292330ee2a98SJason J. Herne 		r = -EFAULT;
292430ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
292530ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
292630ee2a98SJason J. Herne 			break;
292730ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
292830ee2a98SJason J. Herne 		break;
292930ee2a98SJason J. Herne 	}
29304036e387SClaudio Imbrenda 	case KVM_S390_GET_CMMA_BITS: {
29314036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
29324036e387SClaudio Imbrenda 
29334036e387SClaudio Imbrenda 		r = -EFAULT;
29344036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
29354036e387SClaudio Imbrenda 			break;
29361de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
29374036e387SClaudio Imbrenda 		r = kvm_s390_get_cmma_bits(kvm, &args);
29381de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
29394036e387SClaudio Imbrenda 		if (!r) {
29404036e387SClaudio Imbrenda 			r = copy_to_user(argp, &args, sizeof(args));
29414036e387SClaudio Imbrenda 			if (r)
29424036e387SClaudio Imbrenda 				r = -EFAULT;
29434036e387SClaudio Imbrenda 		}
29444036e387SClaudio Imbrenda 		break;
29454036e387SClaudio Imbrenda 	}
29464036e387SClaudio Imbrenda 	case KVM_S390_SET_CMMA_BITS: {
29474036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
29484036e387SClaudio Imbrenda 
29494036e387SClaudio Imbrenda 		r = -EFAULT;
29504036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
29514036e387SClaudio Imbrenda 			break;
29521de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
29534036e387SClaudio Imbrenda 		r = kvm_s390_set_cmma_bits(kvm, &args);
29541de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
29554036e387SClaudio Imbrenda 		break;
29564036e387SClaudio Imbrenda 	}
295729b40f10SJanosch Frank 	case KVM_S390_PV_COMMAND: {
295829b40f10SJanosch Frank 		struct kvm_pv_cmd args;
295929b40f10SJanosch Frank 
296067cf68b6SEric Farman 		/* protvirt means user cpu state */
296167cf68b6SEric Farman 		kvm_s390_set_user_cpu_state_ctrl(kvm);
296229b40f10SJanosch Frank 		r = 0;
296329b40f10SJanosch Frank 		if (!is_prot_virt_host()) {
296429b40f10SJanosch Frank 			r = -EINVAL;
296529b40f10SJanosch Frank 			break;
296629b40f10SJanosch Frank 		}
296729b40f10SJanosch Frank 		if (copy_from_user(&args, argp, sizeof(args))) {
296829b40f10SJanosch Frank 			r = -EFAULT;
296929b40f10SJanosch Frank 			break;
297029b40f10SJanosch Frank 		}
297129b40f10SJanosch Frank 		if (args.flags) {
297229b40f10SJanosch Frank 			r = -EINVAL;
297329b40f10SJanosch Frank 			break;
297429b40f10SJanosch Frank 		}
2975fb491d55SClaudio Imbrenda 		/* must be called without kvm->lock */
297629b40f10SJanosch Frank 		r = kvm_s390_handle_pv(kvm, &args);
297729b40f10SJanosch Frank 		if (copy_to_user(argp, &args, sizeof(args))) {
297829b40f10SJanosch Frank 			r = -EFAULT;
297929b40f10SJanosch Frank 			break;
298029b40f10SJanosch Frank 		}
298129b40f10SJanosch Frank 		break;
298229b40f10SJanosch Frank 	}
2983ef11c946SJanis Schoetterl-Glausch 	case KVM_S390_MEM_OP: {
2984ef11c946SJanis Schoetterl-Glausch 		struct kvm_s390_mem_op mem_op;
2985ef11c946SJanis Schoetterl-Glausch 
2986ef11c946SJanis Schoetterl-Glausch 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2987ef11c946SJanis Schoetterl-Glausch 			r = kvm_s390_vm_mem_op(kvm, &mem_op);
2988ef11c946SJanis Schoetterl-Glausch 		else
2989ef11c946SJanis Schoetterl-Glausch 			r = -EFAULT;
2990ef11c946SJanis Schoetterl-Glausch 		break;
2991ef11c946SJanis Schoetterl-Glausch 	}
2992db1c875eSMatthew Rosato 	case KVM_S390_ZPCI_OP: {
2993db1c875eSMatthew Rosato 		struct kvm_s390_zpci_op args;
2994db1c875eSMatthew Rosato 
2995db1c875eSMatthew Rosato 		r = -EINVAL;
2996db1c875eSMatthew Rosato 		if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
2997db1c875eSMatthew Rosato 			break;
2998db1c875eSMatthew Rosato 		if (copy_from_user(&args, argp, sizeof(args))) {
2999db1c875eSMatthew Rosato 			r = -EFAULT;
3000db1c875eSMatthew Rosato 			break;
3001db1c875eSMatthew Rosato 		}
3002db1c875eSMatthew Rosato 		r = kvm_s390_pci_zpci_op(kvm, &args);
3003db1c875eSMatthew Rosato 		break;
3004db1c875eSMatthew Rosato 	}
3005b0c632dbSHeiko Carstens 	default:
3006367e1319SAvi Kivity 		r = -ENOTTY;
3007b0c632dbSHeiko Carstens 	}
3008b0c632dbSHeiko Carstens 
3009b0c632dbSHeiko Carstens 	return r;
3010b0c632dbSHeiko Carstens }
3011b0c632dbSHeiko Carstens 
301245c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
301345c9b47cSTony Krowiak {
3014e585b24aSTony Krowiak 	struct ap_config_info info;
301545c9b47cSTony Krowiak 
3016e585b24aSTony Krowiak 	if (ap_instructions_available()) {
3017e585b24aSTony Krowiak 		if (ap_qci(&info) == 0)
3018e585b24aSTony Krowiak 			return info.apxa;
301945c9b47cSTony Krowiak 	}
302045c9b47cSTony Krowiak 
302145c9b47cSTony Krowiak 	return 0;
302245c9b47cSTony Krowiak }
302345c9b47cSTony Krowiak 
3024e585b24aSTony Krowiak /*
3025e585b24aSTony Krowiak  * The format of the crypto control block (CRYCB) is specified in the 3 low
3026e585b24aSTony Krowiak  * order bits of the CRYCB designation (CRYCBD) field as follows:
3027e585b24aSTony Krowiak  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3028e585b24aSTony Krowiak  *	     AP extended addressing (APXA) facility are installed.
3029e585b24aSTony Krowiak  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3030e585b24aSTony Krowiak  * Format 2: Both the APXA and MSAX3 facilities are installed
3031e585b24aSTony Krowiak  */
303245c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
303345c9b47cSTony Krowiak {
303445c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
303545c9b47cSTony Krowiak 
3036e585b24aSTony Krowiak 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
3037e585b24aSTony Krowiak 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3038e585b24aSTony Krowiak 
3039e585b24aSTony Krowiak 	/* Check whether MSAX3 is installed */
3040e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
3041e585b24aSTony Krowiak 		return;
3042e585b24aSTony Krowiak 
304345c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
304445c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
304545c9b47cSTony Krowiak 	else
304645c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
304745c9b47cSTony Krowiak }
304845c9b47cSTony Krowiak 
304986956e70STony Krowiak /*
305086956e70STony Krowiak  * kvm_arch_crypto_set_masks
305186956e70STony Krowiak  *
305286956e70STony Krowiak  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
305386956e70STony Krowiak  *	 to be set.
305486956e70STony Krowiak  * @apm: the mask identifying the accessible AP adapters
305586956e70STony Krowiak  * @aqm: the mask identifying the accessible AP domains
305686956e70STony Krowiak  * @adm: the mask identifying the accessible AP control domains
305786956e70STony Krowiak  *
305886956e70STony Krowiak  * Set the masks that identify the adapters, domains and control domains to
305986956e70STony Krowiak  * which the KVM guest is granted access.
306086956e70STony Krowiak  *
306186956e70STony Krowiak  * Note: The kvm->lock mutex must be locked by the caller before invoking this
306286956e70STony Krowiak  *	 function.
306386956e70STony Krowiak  */
30640e237e44SPierre Morel void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
30650e237e44SPierre Morel 			       unsigned long *aqm, unsigned long *adm)
30660e237e44SPierre Morel {
30670e237e44SPierre Morel 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
30680e237e44SPierre Morel 
30690e237e44SPierre Morel 	kvm_s390_vcpu_block_all(kvm);
30700e237e44SPierre Morel 
30710e237e44SPierre Morel 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
30720e237e44SPierre Morel 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
30730e237e44SPierre Morel 		memcpy(crycb->apcb1.apm, apm, 32);
30740e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
30750e237e44SPierre Morel 			 apm[0], apm[1], apm[2], apm[3]);
30760e237e44SPierre Morel 		memcpy(crycb->apcb1.aqm, aqm, 32);
30770e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
30780e237e44SPierre Morel 			 aqm[0], aqm[1], aqm[2], aqm[3]);
30790e237e44SPierre Morel 		memcpy(crycb->apcb1.adm, adm, 32);
30800e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
30810e237e44SPierre Morel 			 adm[0], adm[1], adm[2], adm[3]);
30820e237e44SPierre Morel 		break;
30830e237e44SPierre Morel 	case CRYCB_FORMAT1:
30840e237e44SPierre Morel 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
30850e237e44SPierre Morel 		memcpy(crycb->apcb0.apm, apm, 8);
30860e237e44SPierre Morel 		memcpy(crycb->apcb0.aqm, aqm, 2);
30870e237e44SPierre Morel 		memcpy(crycb->apcb0.adm, adm, 2);
30880e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
30890e237e44SPierre Morel 			 apm[0], *((unsigned short *)aqm),
30900e237e44SPierre Morel 			 *((unsigned short *)adm));
30910e237e44SPierre Morel 		break;
30920e237e44SPierre Morel 	default:	/* Can not happen */
30930e237e44SPierre Morel 		break;
30940e237e44SPierre Morel 	}
30950e237e44SPierre Morel 
30960e237e44SPierre Morel 	/* recreate the shadow crycb for each vcpu */
30970e237e44SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
30980e237e44SPierre Morel 	kvm_s390_vcpu_unblock_all(kvm);
30990e237e44SPierre Morel }
31000e237e44SPierre Morel EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
31010e237e44SPierre Morel 
310286956e70STony Krowiak /*
310386956e70STony Krowiak  * kvm_arch_crypto_clear_masks
310486956e70STony Krowiak  *
310586956e70STony Krowiak  * @kvm: pointer to the target guest's KVM struct containing the crypto masks
310686956e70STony Krowiak  *	 to be cleared.
310786956e70STony Krowiak  *
310886956e70STony Krowiak  * Clear the masks that identify the adapters, domains and control domains to
310986956e70STony Krowiak  * which the KVM guest is granted access.
311086956e70STony Krowiak  *
311186956e70STony Krowiak  * Note: The kvm->lock mutex must be locked by the caller before invoking this
311286956e70STony Krowiak  *	 function.
311386956e70STony Krowiak  */
311442104598STony Krowiak void kvm_arch_crypto_clear_masks(struct kvm *kvm)
311542104598STony Krowiak {
311642104598STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
311742104598STony Krowiak 
311842104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
311942104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb0));
312042104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
312142104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb1));
312242104598STony Krowiak 
31230e237e44SPierre Morel 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
31246cc571b1SPierre Morel 	/* recreate the shadow crycb for each vcpu */
31256cc571b1SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
312642104598STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
312742104598STony Krowiak }
312842104598STony Krowiak EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
312942104598STony Krowiak 
31309bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
31319d8d5786SMichael Mueller {
31329bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
31339bb0ec09SDavid Hildenbrand 
31349bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
31359bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
31369bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
31379d8d5786SMichael Mueller }
31389d8d5786SMichael Mueller 
3139c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
31405102ee87STony Krowiak {
3141c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
314245c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
31431e753732STony Krowiak 	init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
31445102ee87STony Krowiak 
3145e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
3146e585b24aSTony Krowiak 		return;
3147e585b24aSTony Krowiak 
3148ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
3149ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
3150ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
3151ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3152ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3153ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3154ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
31555102ee87STony Krowiak }
31565102ee87STony Krowiak 
31577d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
31587d43bafcSEugene (jno) Dvurechenski {
31597d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
31605e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
31617d43bafcSEugene (jno) Dvurechenski 	else
31627d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
31637d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
31647d43bafcSEugene (jno) Dvurechenski }
31657d43bafcSEugene (jno) Dvurechenski 
316609340b2fSMatthew Rosato void kvm_arch_free_vm(struct kvm *kvm)
316709340b2fSMatthew Rosato {
316809340b2fSMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
316909340b2fSMatthew Rosato 		kvm_s390_pci_clear_list(kvm);
317009340b2fSMatthew Rosato 
317109340b2fSMatthew Rosato 	__kvm_arch_free_vm(kvm);
317209340b2fSMatthew Rosato }
317309340b2fSMatthew Rosato 
3174e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3175b0c632dbSHeiko Carstens {
3176c4196218SChristian Borntraeger 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
31779d8d5786SMichael Mueller 	int i, rc;
3178b0c632dbSHeiko Carstens 	char debug_name[16];
3179f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
3180b0c632dbSHeiko Carstens 
3181e08b9637SCarsten Otte 	rc = -EINVAL;
3182e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
3183e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
3184e08b9637SCarsten Otte 		goto out_err;
3185e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3186e08b9637SCarsten Otte 		goto out_err;
3187e08b9637SCarsten Otte #else
3188e08b9637SCarsten Otte 	if (type)
3189e08b9637SCarsten Otte 		goto out_err;
3190e08b9637SCarsten Otte #endif
3191e08b9637SCarsten Otte 
3192b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
3193b0c632dbSHeiko Carstens 	if (rc)
3194d89f5effSJan Kiszka 		goto out_err;
3195b0c632dbSHeiko Carstens 
3196b290411aSCarsten Otte 	rc = -ENOMEM;
3197b290411aSCarsten Otte 
319876a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
319976a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
32005e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
32019ac96d75SDavid Hildenbrand 	/* start with basic SCA */
320276a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3203b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
3204d89f5effSJan Kiszka 		goto out_err;
32050d9ce162SJunaid Shahid 	mutex_lock(&kvm_lock);
3206c5c2c393SDavid Hildenbrand 	sca_offset += 16;
3207bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3208c5c2c393SDavid Hildenbrand 		sca_offset = 0;
3209bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
3210bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
32110d9ce162SJunaid Shahid 	mutex_unlock(&kvm_lock);
3212b0c632dbSHeiko Carstens 
3213b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
3214b0c632dbSHeiko Carstens 
32151cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3216b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
321740f5b735SDominik Dingel 		goto out_err;
3218b0c632dbSHeiko Carstens 
321919114bebSMichael Mueller 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3220c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
3221c4196218SChristian Borntraeger 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3222c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
322340f5b735SDominik Dingel 		goto out_err;
32249d8d5786SMichael Mueller 
322525c84dbaSMichael Mueller 	kvm->arch.sie_page2->kvm = kvm;
3226c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3227c3b9e3e1SChristian Borntraeger 
3228c3b9e3e1SChristian Borntraeger 	for (i = 0; i < kvm_s390_fac_size(); i++) {
322917e89e13SSven Schnelle 		kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3230c3b9e3e1SChristian Borntraeger 					      (kvm_s390_fac_base[i] |
3231c3b9e3e1SChristian Borntraeger 					       kvm_s390_fac_ext[i]);
323217e89e13SSven Schnelle 		kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3233c3b9e3e1SChristian Borntraeger 					      kvm_s390_fac_base[i];
3234c3b9e3e1SChristian Borntraeger 	}
3235346fa2f8SChristian Borntraeger 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3236981467c9SMichael Mueller 
32371935222dSDavid Hildenbrand 	/* we are always in czam mode - even on pre z14 machines */
32381935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
32391935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_list, 138);
32401935222dSDavid Hildenbrand 	/* we emulate STHYI in kvm */
324195ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
324295ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
32431bab1c02SClaudio Imbrenda 	if (MACHINE_HAS_TLB_GUEST) {
32441bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
32451bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_list, 147);
32461bab1c02SClaudio Imbrenda 	}
324795ca2cb5SJanosch Frank 
324805f31e3bSPierre Morel 	if (css_general_characteristics.aiv && test_facility(65))
324905f31e3bSPierre Morel 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
325005f31e3bSPierre Morel 
32519bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
325237c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
32539d8d5786SMichael Mueller 
3254c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
32555102ee87STony Krowiak 
325609340b2fSMatthew Rosato 	if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
325709340b2fSMatthew Rosato 		mutex_lock(&kvm->lock);
325809340b2fSMatthew Rosato 		kvm_s390_pci_init_list(kvm);
325909340b2fSMatthew Rosato 		kvm_s390_vcpu_pci_enable_interp(kvm);
326009340b2fSMatthew Rosato 		mutex_unlock(&kvm->lock);
326109340b2fSMatthew Rosato 	}
326209340b2fSMatthew Rosato 
326351978393SFei Li 	mutex_init(&kvm->arch.float_int.ais_lock);
3264ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
32656d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
32666d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
32678a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
3268a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
3269ba5c1e9bSCarsten Otte 
3270b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
327178f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
3272b0c632dbSHeiko Carstens 
3273e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
3274e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
3275a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3276e08b9637SCarsten Otte 	} else {
327732e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
3278ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = TASK_SIZE_MAX;
327932e6b236SGuenther Hutzl 		else
3280ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
328132e6b236SGuenther Hutzl 						    sclp.hamax + 1);
32826ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3283598841caSCarsten Otte 		if (!kvm->arch.gmap)
328440f5b735SDominik Dingel 			goto out_err;
32852c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
328624eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
3287e08b9637SCarsten Otte 	}
3288fa6b7fe9SCornelia Huck 
3289c9f0a2b8SJanosch Frank 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
329055531b74SJanosch Frank 	kvm->arch.use_skf = sclp.has_skey;
32918ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
3292a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
3293cc674ef2SMichael Mueller 	if (use_gisa)
3294d7c5cb01SMichael Mueller 		kvm_s390_gisa_init(kvm);
3295fb491d55SClaudio Imbrenda 	INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3296fb491d55SClaudio Imbrenda 	kvm->arch.pv.set_aside = NULL;
32978335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
32988ad35755SDavid Hildenbrand 
3299d89f5effSJan Kiszka 	return 0;
3300d89f5effSJan Kiszka out_err:
3301c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
330240f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
33037d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
330478f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
3305d89f5effSJan Kiszka 	return rc;
3306b0c632dbSHeiko Carstens }
3307b0c632dbSHeiko Carstens 
3308d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3309d329c035SChristian Borntraeger {
331029b40f10SJanosch Frank 	u16 rc, rrc;
331129b40f10SJanosch Frank 
3312d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3313ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
331467335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
33153c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
3316bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
3317a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
331824fe0195SPierre Morel 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
331927e0393fSCarsten Otte 
332027e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
33216ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
332227e0393fSCarsten Otte 
3323e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
3324b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
332529b40f10SJanosch Frank 	/* We can not hold the vcpu mutex here, we are already dying */
332629b40f10SJanosch Frank 	if (kvm_s390_pv_cpu_get_handle(vcpu))
332729b40f10SJanosch Frank 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3328d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
3329d329c035SChristian Borntraeger }
3330d329c035SChristian Borntraeger 
3331b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
3332b0c632dbSHeiko Carstens {
333329b40f10SJanosch Frank 	u16 rc, rrc;
333429b40f10SJanosch Frank 
333527592ae8SMarc Zyngier 	kvm_destroy_vcpus(kvm);
33367d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
3337d7c5cb01SMichael Mueller 	kvm_s390_gisa_destroy(kvm);
333829b40f10SJanosch Frank 	/*
333929b40f10SJanosch Frank 	 * We are already at the end of life and kvm->lock is not taken.
334029b40f10SJanosch Frank 	 * This is ok as the file descriptor is closed by now and nobody
3341fb491d55SClaudio Imbrenda 	 * can mess with the pv state.
334229b40f10SJanosch Frank 	 */
3343fb491d55SClaudio Imbrenda 	kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3344ca2fd060SClaudio Imbrenda 	/*
3345ca2fd060SClaudio Imbrenda 	 * Remove the mmu notifier only when the whole KVM VM is torn down,
3346ca2fd060SClaudio Imbrenda 	 * and only if one was registered to begin with. If the VM is
3347ca2fd060SClaudio Imbrenda 	 * currently not protected, but has been previously been protected,
3348ca2fd060SClaudio Imbrenda 	 * then it's possible that the notifier is still registered.
3349ca2fd060SClaudio Imbrenda 	 */
3350ca2fd060SClaudio Imbrenda 	if (kvm->arch.pv.mmu_notifier.ops)
3351ca2fd060SClaudio Imbrenda 		mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3352ca2fd060SClaudio Imbrenda 
335329b40f10SJanosch Frank 	debug_unregister(kvm->arch.dbf);
3354c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
335527e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
33566ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
3357841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
335867335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
3359a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
33608335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3361b0c632dbSHeiko Carstens }
3362b0c632dbSHeiko Carstens 
3363b0c632dbSHeiko Carstens /* Section: vcpu related */
3364dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3365b0c632dbSHeiko Carstens {
33666ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
336727e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
336827e0393fSCarsten Otte 		return -ENOMEM;
33692c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
3370dafd032aSDominik Dingel 
337127e0393fSCarsten Otte 	return 0;
337227e0393fSCarsten Otte }
337327e0393fSCarsten Otte 
3374a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3375a6e2f683SEugene (jno) Dvurechenski {
3376a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries())
3377a6940674SDavid Hildenbrand 		return;
33785e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
33797d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
33807d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
33817d43bafcSEugene (jno) Dvurechenski 
33827d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
33837d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
33847d43bafcSEugene (jno) Dvurechenski 	} else {
3385bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3386a6e2f683SEugene (jno) Dvurechenski 
3387a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3388a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
3389a6e2f683SEugene (jno) Dvurechenski 	}
33905e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
33917d43bafcSEugene (jno) Dvurechenski }
3392a6e2f683SEugene (jno) Dvurechenski 
3393eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3394a6e2f683SEugene (jno) Dvurechenski {
3395a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3396fe0ef003SNico Boehr 		phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3397a6940674SDavid Hildenbrand 
3398a6940674SDavid Hildenbrand 		/* we still need the basic sca for the ipte control */
3399fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3400fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaol = sca_phys;
3401f07afa04SDavid Hildenbrand 		return;
3402a6940674SDavid Hildenbrand 	}
3403eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
3404eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
3405eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
3406fe0ef003SNico Boehr 		phys_addr_t sca_phys = virt_to_phys(sca);
34077d43bafcSEugene (jno) Dvurechenski 
3408fe0ef003SNico Boehr 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3409fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3410fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
34110c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3412eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
34137d43bafcSEugene (jno) Dvurechenski 	} else {
3414eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
3415fe0ef003SNico Boehr 		phys_addr_t sca_phys = virt_to_phys(sca);
3416a6e2f683SEugene (jno) Dvurechenski 
3417fe0ef003SNico Boehr 		sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3418fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3419fe0ef003SNico Boehr 		vcpu->arch.sie_block->scaol = sca_phys;
3420eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3421a6e2f683SEugene (jno) Dvurechenski 	}
3422eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
34235e044315SEugene (jno) Dvurechenski }
34245e044315SEugene (jno) Dvurechenski 
34255e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
34265e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
34275e044315SEugene (jno) Dvurechenski {
34285e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
34295e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
34305e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
34315e044315SEugene (jno) Dvurechenski }
34325e044315SEugene (jno) Dvurechenski 
34335e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
34345e044315SEugene (jno) Dvurechenski {
34355e044315SEugene (jno) Dvurechenski 	int i;
34365e044315SEugene (jno) Dvurechenski 
34375e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
34385e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
34395e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
34405e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
34415e044315SEugene (jno) Dvurechenski }
34425e044315SEugene (jno) Dvurechenski 
34435e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
34445e044315SEugene (jno) Dvurechenski {
34455e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
34465e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
34475e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
344846808a4cSMarc Zyngier 	unsigned long vcpu_idx;
34495e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
3450fe0ef003SNico Boehr 	phys_addr_t new_sca_phys;
34515e044315SEugene (jno) Dvurechenski 
345229b40f10SJanosch Frank 	if (kvm->arch.use_esca)
345329b40f10SJanosch Frank 		return 0;
345429b40f10SJanosch Frank 
3455c4196218SChristian Borntraeger 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
34565e044315SEugene (jno) Dvurechenski 	if (!new_sca)
34575e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
34585e044315SEugene (jno) Dvurechenski 
3459fe0ef003SNico Boehr 	new_sca_phys = virt_to_phys(new_sca);
3460fe0ef003SNico Boehr 	scaoh = new_sca_phys >> 32;
3461fe0ef003SNico Boehr 	scaol = new_sca_phys & ESCA_SCAOL_MASK;
34625e044315SEugene (jno) Dvurechenski 
34635e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
34645e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
34655e044315SEugene (jno) Dvurechenski 
34665e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
34675e044315SEugene (jno) Dvurechenski 
34685e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
34695e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
34705e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
34710c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
34725e044315SEugene (jno) Dvurechenski 	}
34735e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
34745e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
34755e044315SEugene (jno) Dvurechenski 
34765e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
34775e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
34785e044315SEugene (jno) Dvurechenski 
34795e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
34805e044315SEugene (jno) Dvurechenski 
34818335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
34828335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
34835e044315SEugene (jno) Dvurechenski 	return 0;
34847d43bafcSEugene (jno) Dvurechenski }
3485a6e2f683SEugene (jno) Dvurechenski 
3486a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3487a6e2f683SEugene (jno) Dvurechenski {
34885e044315SEugene (jno) Dvurechenski 	int rc;
34895e044315SEugene (jno) Dvurechenski 
3490a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3491a6940674SDavid Hildenbrand 		if (id < KVM_MAX_VCPUS)
3492a6940674SDavid Hildenbrand 			return true;
3493a6940674SDavid Hildenbrand 		return false;
3494a6940674SDavid Hildenbrand 	}
34955e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
34965e044315SEugene (jno) Dvurechenski 		return true;
349776a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
34985e044315SEugene (jno) Dvurechenski 		return false;
34995e044315SEugene (jno) Dvurechenski 
35005e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
35015e044315SEugene (jno) Dvurechenski 
35025e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3503a6e2f683SEugene (jno) Dvurechenski }
3504a6e2f683SEugene (jno) Dvurechenski 
3505db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3506db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3507db0758b2SDavid Hildenbrand {
3508db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
35099c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3510db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
35119c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3512db0758b2SDavid Hildenbrand }
3513db0758b2SDavid Hildenbrand 
3514db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3515db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3516db0758b2SDavid Hildenbrand {
3517db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
35189c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3519db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3520db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
35219c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3522db0758b2SDavid Hildenbrand }
3523db0758b2SDavid Hildenbrand 
3524db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3525db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3526db0758b2SDavid Hildenbrand {
3527db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3528db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
3529db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
3530db0758b2SDavid Hildenbrand }
3531db0758b2SDavid Hildenbrand 
3532db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3533db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3534db0758b2SDavid Hildenbrand {
3535db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3536db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
3537db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
3538db0758b2SDavid Hildenbrand }
3539db0758b2SDavid Hildenbrand 
3540db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3541db0758b2SDavid Hildenbrand {
3542db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3543db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
3544db0758b2SDavid Hildenbrand 	preempt_enable();
3545db0758b2SDavid Hildenbrand }
3546db0758b2SDavid Hildenbrand 
3547db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3548db0758b2SDavid Hildenbrand {
3549db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3550db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
3551db0758b2SDavid Hildenbrand 	preempt_enable();
3552db0758b2SDavid Hildenbrand }
3553db0758b2SDavid Hildenbrand 
35544287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
35554287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
35564287f247SDavid Hildenbrand {
3557db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
35589c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3559db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
3560db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
35614287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
35629c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3563db0758b2SDavid Hildenbrand 	preempt_enable();
35644287f247SDavid Hildenbrand }
35654287f247SDavid Hildenbrand 
3566db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
35674287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
35684287f247SDavid Hildenbrand {
35699c23a131SDavid Hildenbrand 	unsigned int seq;
3570db0758b2SDavid Hildenbrand 	__u64 value;
3571db0758b2SDavid Hildenbrand 
3572db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
35734287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
3574db0758b2SDavid Hildenbrand 
35759c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
35769c23a131SDavid Hildenbrand 	do {
35779c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
35789c23a131SDavid Hildenbrand 		/*
35799c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
35809c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
35819c23a131SDavid Hildenbrand 		 */
35829c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3583db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
35849c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
35859c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
3586db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
35879c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
35889c23a131SDavid Hildenbrand 	preempt_enable();
3589db0758b2SDavid Hildenbrand 	return value;
35904287f247SDavid Hildenbrand }
35914287f247SDavid Hildenbrand 
3592b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3593b0c632dbSHeiko Carstens {
35949977e886SHendrik Brueckner 
359537d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
3596ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
35975ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3598db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
359901a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
3600b0c632dbSHeiko Carstens }
3601b0c632dbSHeiko Carstens 
3602b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3603b0c632dbSHeiko Carstens {
360401a745acSDavid Hildenbrand 	vcpu->cpu = -1;
36055ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3606db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
36079daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
360837d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
360937d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
36109977e886SHendrik Brueckner 
3611b0c632dbSHeiko Carstens }
3612b0c632dbSHeiko Carstens 
361331928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
361442897d86SMarcelo Tosatti {
361572f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
3616fdf03650SFan Zhang 	preempt_disable();
361772f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3618d16b52cbSDavid Hildenbrand 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3619fdf03650SFan Zhang 	preempt_enable();
362072f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
362125508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3622dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3623eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
362425508824SDavid Hildenbrand 	}
36256502a34cSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
36266502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
362737d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
362837d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
362942897d86SMarcelo Tosatti }
363042897d86SMarcelo Tosatti 
36318ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
36328ec2fa52SChristian Borntraeger {
36338ec2fa52SChristian Borntraeger 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
36348ec2fa52SChristian Borntraeger 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
36358ec2fa52SChristian Borntraeger 		return true;
36368ec2fa52SChristian Borntraeger 	return false;
36378ec2fa52SChristian Borntraeger }
36388ec2fa52SChristian Borntraeger 
36398ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_ecc(struct kvm *kvm)
36408ec2fa52SChristian Borntraeger {
36418ec2fa52SChristian Borntraeger 	/* At least one ECC subfunction must be present */
36428ec2fa52SChristian Borntraeger 	return kvm_has_pckmo_subfunc(kvm, 32) ||
36438ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 33) ||
36448ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 34) ||
36458ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 40) ||
36468ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 41);
36478ec2fa52SChristian Borntraeger 
36488ec2fa52SChristian Borntraeger }
36498ec2fa52SChristian Borntraeger 
36505102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
36515102ee87STony Krowiak {
3652e585b24aSTony Krowiak 	/*
3653e585b24aSTony Krowiak 	 * If the AP instructions are not being interpreted and the MSAX3
3654e585b24aSTony Krowiak 	 * facility is not configured for the guest, there is nothing to set up.
3655e585b24aSTony Krowiak 	 */
3656e585b24aSTony Krowiak 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
36575102ee87STony Krowiak 		return;
36585102ee87STony Krowiak 
3659e585b24aSTony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3660a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
366137940fb0STony Krowiak 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
36628ec2fa52SChristian Borntraeger 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3663a374e892STony Krowiak 
3664e585b24aSTony Krowiak 	if (vcpu->kvm->arch.crypto.apie)
3665e585b24aSTony Krowiak 		vcpu->arch.sie_block->eca |= ECA_APIE;
3666e585b24aSTony Krowiak 
3667e585b24aSTony Krowiak 	/* Set up protected key support */
36688ec2fa52SChristian Borntraeger 	if (vcpu->kvm->arch.crypto.aes_kw) {
3669a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
36708ec2fa52SChristian Borntraeger 		/* ecc is also wrapped with AES key */
36718ec2fa52SChristian Borntraeger 		if (kvm_has_pckmo_ecc(vcpu->kvm))
36728ec2fa52SChristian Borntraeger 			vcpu->arch.sie_block->ecd |= ECD_ECC;
36738ec2fa52SChristian Borntraeger 	}
36748ec2fa52SChristian Borntraeger 
3675a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
3676a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
36775102ee87STony Krowiak }
36785102ee87STony Krowiak 
3679b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3680b31605c1SDominik Dingel {
3681fe0ef003SNico Boehr 	free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3682b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
3683b31605c1SDominik Dingel }
3684b31605c1SDominik Dingel 
3685b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3686b31605c1SDominik Dingel {
3687fe0ef003SNico Boehr 	void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3688fe0ef003SNico Boehr 
3689fe0ef003SNico Boehr 	if (!cbrlo_page)
3690b31605c1SDominik Dingel 		return -ENOMEM;
3691fe0ef003SNico Boehr 
3692fe0ef003SNico Boehr 	vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3693b31605c1SDominik Dingel 	return 0;
3694b31605c1SDominik Dingel }
3695b31605c1SDominik Dingel 
369691520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
369791520f1aSMichael Mueller {
369891520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
369991520f1aSMichael Mueller 
370091520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
370180bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
3702fe0ef003SNico Boehr 		vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
370391520f1aSMichael Mueller }
370491520f1aSMichael Mueller 
3705ff72bb55SSean Christopherson static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3706ff72bb55SSean Christopherson {
3707b31605c1SDominik Dingel 	int rc = 0;
370829b40f10SJanosch Frank 	u16 uvrc, uvrrc;
3709b31288faSKonstantin Weitz 
37109e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
37119e6dabefSCornelia Huck 						    CPUSTAT_SM |
3712a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
3713a4a4f191SGuenther Hutzl 
371453df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
3715ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
371653df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
3717ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3718a4a4f191SGuenther Hutzl 
371991520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
372091520f1aSMichael Mueller 
3721bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3722bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
37230c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3724bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
37250c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
372624fe0195SPierre Morel 	if (test_kvm_facility(vcpu->kvm, 11))
372724fe0195SPierre Morel 		vcpu->arch.sie_block->ecb |= ECB_PTF;
3728f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
37290c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_TE;
37307119decfSJanis Schoetterl-Glausch 	if (!kvm_is_ucontrol(vcpu->kvm))
37317119decfSJanis Schoetterl-Glausch 		vcpu->arch.sie_block->ecb |= ECB_SPECI;
37327feb6bb8SMichael Mueller 
3733c9f0a2b8SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
37340c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3735cd1836f5SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 130))
37360c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
37370c9d8683SDavid Hildenbrand 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
373848ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
37390c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_CEI;
374011ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
37410c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_IB;
374237c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
37430c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SII;
374437c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
37450c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
374618280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
37470c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_VX;
37480c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
374913211ea7SEric Farman 	}
37508fa1696eSCollin L. Walling 	if (test_kvm_facility(vcpu->kvm, 139))
37518fa1696eSCollin L. Walling 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3752a3da7b4aSChristian Borntraeger 	if (test_kvm_facility(vcpu->kvm, 156))
3753a3da7b4aSChristian Borntraeger 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3754d7c5cb01SMichael Mueller 	if (vcpu->arch.sie_block->gd) {
3755d7c5cb01SMichael Mueller 		vcpu->arch.sie_block->eca |= ECA_AIV;
3756d7c5cb01SMichael Mueller 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3757d7c5cb01SMichael Mueller 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3758d7c5cb01SMichael Mueller 	}
3759fe0ef003SNico Boehr 	vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3760fe0ef003SNico Boehr 	vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3761730cd632SFarhan Ali 
3762730cd632SFarhan Ali 	if (sclp.has_kss)
3763ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3764730cd632SFarhan Ali 	else
3765492d8642SThomas Huth 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
37665a5e6536SMatthew Rosato 
3767e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
3768b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3769b31605c1SDominik Dingel 		if (rc)
3770b31605c1SDominik Dingel 			return rc;
3771b31288faSKonstantin Weitz 	}
37720ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3773ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
37749d8d5786SMichael Mueller 
377567d49d52SCollin Walling 	vcpu->arch.sie_block->hpid = HPID_KVM;
377667d49d52SCollin Walling 
37775102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
37785102ee87STony Krowiak 
37793f4bbb43SMatthew Rosato 	kvm_s390_vcpu_pci_setup(vcpu);
37803f4bbb43SMatthew Rosato 
378129b40f10SJanosch Frank 	mutex_lock(&vcpu->kvm->lock);
378229b40f10SJanosch Frank 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
378329b40f10SJanosch Frank 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
378429b40f10SJanosch Frank 		if (rc)
378529b40f10SJanosch Frank 			kvm_s390_vcpu_unsetup_cmma(vcpu);
378629b40f10SJanosch Frank 	}
378729b40f10SJanosch Frank 	mutex_unlock(&vcpu->kvm->lock);
378829b40f10SJanosch Frank 
3789b31605c1SDominik Dingel 	return rc;
3790b0c632dbSHeiko Carstens }
3791b0c632dbSHeiko Carstens 
3792897cc38eSSean Christopherson int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3793897cc38eSSean Christopherson {
3794897cc38eSSean Christopherson 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3795897cc38eSSean Christopherson 		return -EINVAL;
3796897cc38eSSean Christopherson 	return 0;
3797897cc38eSSean Christopherson }
3798897cc38eSSean Christopherson 
3799e529ef66SSean Christopherson int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3800b0c632dbSHeiko Carstens {
38017feb6bb8SMichael Mueller 	struct sie_page *sie_page;
3802897cc38eSSean Christopherson 	int rc;
38034d47555aSCarsten Otte 
3804da72ca4dSQingFeng Hao 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3805c4196218SChristian Borntraeger 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
38067feb6bb8SMichael Mueller 	if (!sie_page)
3807e529ef66SSean Christopherson 		return -ENOMEM;
3808b0c632dbSHeiko Carstens 
38097feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
3810fe0ef003SNico Boehr 	vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
38117feb6bb8SMichael Mueller 
3812efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
3813efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
3814efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
3815efed1104SDavid Hildenbrand 
3816e529ef66SSean Christopherson 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3817ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
3818ee6a569dSMichael Mueller 	vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
38199c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
3820ba5c1e9bSCarsten Otte 
3821321f8ee5SSean Christopherson 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3822321f8ee5SSean Christopherson 	kvm_clear_async_pf_completion_queue(vcpu);
3823321f8ee5SSean Christopherson 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3824321f8ee5SSean Christopherson 				    KVM_SYNC_GPRS |
3825321f8ee5SSean Christopherson 				    KVM_SYNC_ACRS |
3826321f8ee5SSean Christopherson 				    KVM_SYNC_CRS |
3827321f8ee5SSean Christopherson 				    KVM_SYNC_ARCH0 |
382823a60f83SCollin Walling 				    KVM_SYNC_PFAULT |
382923a60f83SCollin Walling 				    KVM_SYNC_DIAG318;
3830321f8ee5SSean Christopherson 	kvm_s390_set_prefix(vcpu, 0);
3831321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 64))
3832321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3833321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 82))
3834321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3835321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 133))
3836321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3837321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 156))
3838321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3839321f8ee5SSean Christopherson 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3840321f8ee5SSean Christopherson 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3841321f8ee5SSean Christopherson 	 */
3842321f8ee5SSean Christopherson 	if (MACHINE_HAS_VX)
3843321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3844321f8ee5SSean Christopherson 	else
3845321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3846321f8ee5SSean Christopherson 
3847321f8ee5SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm)) {
3848321f8ee5SSean Christopherson 		rc = __kvm_ucontrol_vcpu_init(vcpu);
3849321f8ee5SSean Christopherson 		if (rc)
3850a2017f17SSean Christopherson 			goto out_free_sie_block;
3851321f8ee5SSean Christopherson 	}
3852321f8ee5SSean Christopherson 
3853e529ef66SSean Christopherson 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3854e529ef66SSean Christopherson 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3855e529ef66SSean Christopherson 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3856b0c632dbSHeiko Carstens 
3857ff72bb55SSean Christopherson 	rc = kvm_s390_vcpu_setup(vcpu);
3858ff72bb55SSean Christopherson 	if (rc)
3859ff72bb55SSean Christopherson 		goto out_ucontrol_uninit;
386024fe0195SPierre Morel 
386124fe0195SPierre Morel 	kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3862e529ef66SSean Christopherson 	return 0;
3863e529ef66SSean Christopherson 
3864ff72bb55SSean Christopherson out_ucontrol_uninit:
3865ff72bb55SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm))
3866ff72bb55SSean Christopherson 		gmap_remove(vcpu->arch.gmap);
38677b06bf2fSWei Yongjun out_free_sie_block:
38687b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
3869e529ef66SSean Christopherson 	return rc;
3870b0c632dbSHeiko Carstens }
3871b0c632dbSHeiko Carstens 
3872b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3873b0c632dbSHeiko Carstens {
38749b57e9d5SHalil Pasic 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
38759a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3876b0c632dbSHeiko Carstens }
3877b0c632dbSHeiko Carstens 
3878199b5763SLongpeng(Mike) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3879199b5763SLongpeng(Mike) {
38800546c63dSLongpeng(Mike) 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3881199b5763SLongpeng(Mike) }
3882199b5763SLongpeng(Mike) 
388327406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
388449b99e1eSChristian Borntraeger {
3885805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
388661a6df54SDavid Hildenbrand 	exit_sie(vcpu);
388749b99e1eSChristian Borntraeger }
388849b99e1eSChristian Borntraeger 
388927406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
389049b99e1eSChristian Borntraeger {
3891805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
389249b99e1eSChristian Borntraeger }
389349b99e1eSChristian Borntraeger 
38948e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
38958e236546SChristian Borntraeger {
3896805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
389761a6df54SDavid Hildenbrand 	exit_sie(vcpu);
38988e236546SChristian Borntraeger }
38998e236546SChristian Borntraeger 
39009ea59728SDavid Hildenbrand bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
39019ea59728SDavid Hildenbrand {
39029ea59728SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->prog20) &
39039ea59728SDavid Hildenbrand 	       (PROG_BLOCK_SIE | PROG_REQUEST);
39049ea59728SDavid Hildenbrand }
39059ea59728SDavid Hildenbrand 
39068e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
39078e236546SChristian Borntraeger {
39089bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
39098e236546SChristian Borntraeger }
39108e236546SChristian Borntraeger 
391149b99e1eSChristian Borntraeger /*
39129ea59728SDavid Hildenbrand  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
391349b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
391449b99e1eSChristian Borntraeger  * return immediately. */
391549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
391649b99e1eSChristian Borntraeger {
3917ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
39189ea59728SDavid Hildenbrand 	kvm_s390_vsie_kick(vcpu);
391949b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
392049b99e1eSChristian Borntraeger 		cpu_relax();
392149b99e1eSChristian Borntraeger }
392249b99e1eSChristian Borntraeger 
39238e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
39248e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
392549b99e1eSChristian Borntraeger {
3926df06dae3SSean Christopherson 	__kvm_make_request(req, vcpu);
39278e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
392849b99e1eSChristian Borntraeger }
392949b99e1eSChristian Borntraeger 
3930414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3931414d3b07SMartin Schwidefsky 			      unsigned long end)
39322c70fe44SChristian Borntraeger {
39332c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
39342c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
3935414d3b07SMartin Schwidefsky 	unsigned long prefix;
393646808a4cSMarc Zyngier 	unsigned long i;
39372c70fe44SChristian Borntraeger 
393865d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
393965d0b0d4SDavid Hildenbrand 		return;
3940414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
3941414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
3942414d3b07SMartin Schwidefsky 		return;
39432c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
39442c70fe44SChristian Borntraeger 		/* match against both prefix pages */
3945414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
3946414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3947414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3948414d3b07SMartin Schwidefsky 				   start, end);
3949cc65c3a1SSean Christopherson 			kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
39502c70fe44SChristian Borntraeger 		}
39512c70fe44SChristian Borntraeger 	}
39522c70fe44SChristian Borntraeger }
39532c70fe44SChristian Borntraeger 
39548b905d28SChristian Borntraeger bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
39558b905d28SChristian Borntraeger {
39568b905d28SChristian Borntraeger 	/* do not poll with more than halt_poll_max_steal percent of steal time */
39578b905d28SChristian Borntraeger 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
39586f390916SSean Christopherson 	    READ_ONCE(halt_poll_max_steal)) {
39598b905d28SChristian Borntraeger 		vcpu->stat.halt_no_poll_steal++;
39608b905d28SChristian Borntraeger 		return true;
39618b905d28SChristian Borntraeger 	}
39628b905d28SChristian Borntraeger 	return false;
39638b905d28SChristian Borntraeger }
39648b905d28SChristian Borntraeger 
3965b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3966b6d33834SChristoffer Dall {
3967b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
3968b6d33834SChristoffer Dall 	BUG();
3969b6d33834SChristoffer Dall 	return 0;
3970b6d33834SChristoffer Dall }
3971b6d33834SChristoffer Dall 
397214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
397314eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
397414eebd91SCarsten Otte {
397514eebd91SCarsten Otte 	int r = -EINVAL;
397614eebd91SCarsten Otte 
397714eebd91SCarsten Otte 	switch (reg->id) {
397829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
397929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
398029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
398129b7c71bSCarsten Otte 		break;
398229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
398329b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
398429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
398529b7c71bSCarsten Otte 		break;
398646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
39874287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
398846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
398946a6dd1cSJason J. herne 		break;
399046a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
399146a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
399246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
399346a6dd1cSJason J. herne 		break;
3994536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
3995536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
3996536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3997536336c2SDominik Dingel 		break;
3998536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
3999536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
4000536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
4001536336c2SDominik Dingel 		break;
4002536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
4003536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
4004536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
4005536336c2SDominik Dingel 		break;
4006672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
4007672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
4008672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
4009672550fbSChristian Borntraeger 		break;
4010afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
4011afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
4012afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
4013afa45ff5SChristian Borntraeger 		break;
401414eebd91SCarsten Otte 	default:
401514eebd91SCarsten Otte 		break;
401614eebd91SCarsten Otte 	}
401714eebd91SCarsten Otte 
401814eebd91SCarsten Otte 	return r;
401914eebd91SCarsten Otte }
402014eebd91SCarsten Otte 
402114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
402214eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
402314eebd91SCarsten Otte {
402414eebd91SCarsten Otte 	int r = -EINVAL;
40254287f247SDavid Hildenbrand 	__u64 val;
402614eebd91SCarsten Otte 
402714eebd91SCarsten Otte 	switch (reg->id) {
402829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
402929b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
403029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
403129b7c71bSCarsten Otte 		break;
403229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
403329b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
403429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
403529b7c71bSCarsten Otte 		break;
403646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
40374287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
40384287f247SDavid Hildenbrand 		if (!r)
40394287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
404046a6dd1cSJason J. herne 		break;
404146a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
404246a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
404346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
404446a6dd1cSJason J. herne 		break;
4045536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
4046536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
4047536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
40489fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
40499fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
4050536336c2SDominik Dingel 		break;
4051536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
4052536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
4053536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
4054536336c2SDominik Dingel 		break;
4055536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
4056536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
4057536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
4058536336c2SDominik Dingel 		break;
4059672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
4060672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
4061672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
4062672550fbSChristian Borntraeger 		break;
4063afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
4064afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
4065afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
4066afa45ff5SChristian Borntraeger 		break;
406714eebd91SCarsten Otte 	default:
406814eebd91SCarsten Otte 		break;
406914eebd91SCarsten Otte 	}
407014eebd91SCarsten Otte 
407114eebd91SCarsten Otte 	return r;
407214eebd91SCarsten Otte }
4073b6d33834SChristoffer Dall 
40747de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4075b0c632dbSHeiko Carstens {
40767de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
40777de3f142SJanosch Frank 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
40787de3f142SJanosch Frank 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
40797de3f142SJanosch Frank 
40807de3f142SJanosch Frank 	kvm_clear_async_pf_completion_queue(vcpu);
40817de3f142SJanosch Frank 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
40827de3f142SJanosch Frank 		kvm_s390_vcpu_stop(vcpu);
40837de3f142SJanosch Frank 	kvm_s390_clear_local_irqs(vcpu);
40847de3f142SJanosch Frank }
40857de3f142SJanosch Frank 
40867de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
40877de3f142SJanosch Frank {
40887de3f142SJanosch Frank 	/* Initial reset is a superset of the normal reset */
40897de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
40907de3f142SJanosch Frank 
4091e93fc7b4SChristian Borntraeger 	/*
4092e93fc7b4SChristian Borntraeger 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
4093e93fc7b4SChristian Borntraeger 	 * We do not only reset the internal data, but also ...
4094e93fc7b4SChristian Borntraeger 	 */
40957de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask = 0;
40967de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.addr = 0;
40977de3f142SJanosch Frank 	kvm_s390_set_prefix(vcpu, 0);
40987de3f142SJanosch Frank 	kvm_s390_set_cpu_timer(vcpu, 0);
40997de3f142SJanosch Frank 	vcpu->arch.sie_block->ckc = 0;
41007de3f142SJanosch Frank 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
41017de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
41027de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4103e93fc7b4SChristian Borntraeger 
4104e93fc7b4SChristian Borntraeger 	/* ... the data in sync regs */
4105e93fc7b4SChristian Borntraeger 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4106e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
4107e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4108e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4109e93fc7b4SChristian Borntraeger 	vcpu->run->psw_addr = 0;
4110e93fc7b4SChristian Borntraeger 	vcpu->run->psw_mask = 0;
4111e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.todpr = 0;
4112e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.cputm = 0;
4113e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
4114e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.pp = 0;
4115e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.gbea = 1;
41167de3f142SJanosch Frank 	vcpu->run->s.regs.fpc = 0;
41170f303504SJanosch Frank 	/*
41180f303504SJanosch Frank 	 * Do not reset these registers in the protected case, as some of
41190f303504SJanosch Frank 	 * them are overlayed and they are not accessible in this case
41200f303504SJanosch Frank 	 * anyway.
41210f303504SJanosch Frank 	 */
41220f303504SJanosch Frank 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
41237de3f142SJanosch Frank 		vcpu->arch.sie_block->gbea = 1;
41247de3f142SJanosch Frank 		vcpu->arch.sie_block->pp = 0;
41257de3f142SJanosch Frank 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
41260f303504SJanosch Frank 		vcpu->arch.sie_block->todpr = 0;
41270f303504SJanosch Frank 	}
41287de3f142SJanosch Frank }
41297de3f142SJanosch Frank 
41307de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
41317de3f142SJanosch Frank {
41327de3f142SJanosch Frank 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
41337de3f142SJanosch Frank 
41347de3f142SJanosch Frank 	/* Clear reset is a superset of the initial reset */
41357de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
41367de3f142SJanosch Frank 
41377de3f142SJanosch Frank 	memset(&regs->gprs, 0, sizeof(regs->gprs));
41387de3f142SJanosch Frank 	memset(&regs->vrs, 0, sizeof(regs->vrs));
41397de3f142SJanosch Frank 	memset(&regs->acrs, 0, sizeof(regs->acrs));
41407de3f142SJanosch Frank 	memset(&regs->gscb, 0, sizeof(regs->gscb));
41417de3f142SJanosch Frank 
41427de3f142SJanosch Frank 	regs->etoken = 0;
41437de3f142SJanosch Frank 	regs->etoken_extension = 0;
4144b0c632dbSHeiko Carstens }
4145b0c632dbSHeiko Carstens 
4146b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4147b0c632dbSHeiko Carstens {
4148875656feSChristoffer Dall 	vcpu_load(vcpu);
41495a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
4150875656feSChristoffer Dall 	vcpu_put(vcpu);
4151b0c632dbSHeiko Carstens 	return 0;
4152b0c632dbSHeiko Carstens }
4153b0c632dbSHeiko Carstens 
4154b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4155b0c632dbSHeiko Carstens {
41561fc9b76bSChristoffer Dall 	vcpu_load(vcpu);
41575a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
41581fc9b76bSChristoffer Dall 	vcpu_put(vcpu);
4159b0c632dbSHeiko Carstens 	return 0;
4160b0c632dbSHeiko Carstens }
4161b0c632dbSHeiko Carstens 
4162b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4163b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
4164b0c632dbSHeiko Carstens {
4165b4ef9d4eSChristoffer Dall 	vcpu_load(vcpu);
4166b4ef9d4eSChristoffer Dall 
416759674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4168b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4169b4ef9d4eSChristoffer Dall 
4170b4ef9d4eSChristoffer Dall 	vcpu_put(vcpu);
4171b0c632dbSHeiko Carstens 	return 0;
4172b0c632dbSHeiko Carstens }
4173b0c632dbSHeiko Carstens 
4174b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4175b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
4176b0c632dbSHeiko Carstens {
4177bcdec41cSChristoffer Dall 	vcpu_load(vcpu);
4178bcdec41cSChristoffer Dall 
417959674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4180b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4181bcdec41cSChristoffer Dall 
4182bcdec41cSChristoffer Dall 	vcpu_put(vcpu);
4183b0c632dbSHeiko Carstens 	return 0;
4184b0c632dbSHeiko Carstens }
4185b0c632dbSHeiko Carstens 
4186b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4187b0c632dbSHeiko Carstens {
41886a96bc7fSChristoffer Dall 	int ret = 0;
41896a96bc7fSChristoffer Dall 
41906a96bc7fSChristoffer Dall 	vcpu_load(vcpu);
41916a96bc7fSChristoffer Dall 
41926a96bc7fSChristoffer Dall 	if (test_fp_ctl(fpu->fpc)) {
41936a96bc7fSChristoffer Dall 		ret = -EINVAL;
41946a96bc7fSChristoffer Dall 		goto out;
41956a96bc7fSChristoffer Dall 	}
4196e1788bb9SChristian Borntraeger 	vcpu->run->s.regs.fpc = fpu->fpc;
41979abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
4198a7d4b8f2SDavid Hildenbrand 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4199a7d4b8f2SDavid Hildenbrand 				 (freg_t *) fpu->fprs);
42009abc2a08SDavid Hildenbrand 	else
4201a7d4b8f2SDavid Hildenbrand 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
42026a96bc7fSChristoffer Dall 
42036a96bc7fSChristoffer Dall out:
42046a96bc7fSChristoffer Dall 	vcpu_put(vcpu);
42056a96bc7fSChristoffer Dall 	return ret;
4206b0c632dbSHeiko Carstens }
4207b0c632dbSHeiko Carstens 
4208b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4209b0c632dbSHeiko Carstens {
42101393123eSChristoffer Dall 	vcpu_load(vcpu);
42111393123eSChristoffer Dall 
42129abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
42139abc2a08SDavid Hildenbrand 	save_fpu_regs();
42149abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
4215a7d4b8f2SDavid Hildenbrand 		convert_vx_to_fp((freg_t *) fpu->fprs,
4216a7d4b8f2SDavid Hildenbrand 				 (__vector128 *) vcpu->run->s.regs.vrs);
42179abc2a08SDavid Hildenbrand 	else
4218a7d4b8f2SDavid Hildenbrand 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4219e1788bb9SChristian Borntraeger 	fpu->fpc = vcpu->run->s.regs.fpc;
42201393123eSChristoffer Dall 
42211393123eSChristoffer Dall 	vcpu_put(vcpu);
4222b0c632dbSHeiko Carstens 	return 0;
4223b0c632dbSHeiko Carstens }
4224b0c632dbSHeiko Carstens 
4225b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4226b0c632dbSHeiko Carstens {
4227b0c632dbSHeiko Carstens 	int rc = 0;
4228b0c632dbSHeiko Carstens 
42297a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
4230b0c632dbSHeiko Carstens 		rc = -EBUSY;
4231d7b0b5ebSCarsten Otte 	else {
4232d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
4233d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
4234d7b0b5ebSCarsten Otte 	}
4235b0c632dbSHeiko Carstens 	return rc;
4236b0c632dbSHeiko Carstens }
4237b0c632dbSHeiko Carstens 
4238b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4239b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
4240b0c632dbSHeiko Carstens {
4241b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
4242b0c632dbSHeiko Carstens }
4243b0c632dbSHeiko Carstens 
424427291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
424527291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
424627291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
424727291e21SDavid Hildenbrand 
4248d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4249d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
4250b0c632dbSHeiko Carstens {
425127291e21SDavid Hildenbrand 	int rc = 0;
425227291e21SDavid Hildenbrand 
425366b56562SChristoffer Dall 	vcpu_load(vcpu);
425466b56562SChristoffer Dall 
425527291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
425627291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
425727291e21SDavid Hildenbrand 
425866b56562SChristoffer Dall 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
425966b56562SChristoffer Dall 		rc = -EINVAL;
426066b56562SChristoffer Dall 		goto out;
426166b56562SChristoffer Dall 	}
426266b56562SChristoffer Dall 	if (!sclp.has_gpere) {
426366b56562SChristoffer Dall 		rc = -EINVAL;
426466b56562SChristoffer Dall 		goto out;
426566b56562SChristoffer Dall 	}
426627291e21SDavid Hildenbrand 
426727291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
426827291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
426927291e21SDavid Hildenbrand 		/* enforce guest PER */
4270ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
427127291e21SDavid Hildenbrand 
427227291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
427327291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
427427291e21SDavid Hildenbrand 	} else {
42759daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
427627291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
427727291e21SDavid Hildenbrand 	}
427827291e21SDavid Hildenbrand 
427927291e21SDavid Hildenbrand 	if (rc) {
428027291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
428127291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
42829daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
428327291e21SDavid Hildenbrand 	}
428427291e21SDavid Hildenbrand 
428566b56562SChristoffer Dall out:
428666b56562SChristoffer Dall 	vcpu_put(vcpu);
428727291e21SDavid Hildenbrand 	return rc;
4288b0c632dbSHeiko Carstens }
4289b0c632dbSHeiko Carstens 
429062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
429162d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
429262d9f0dbSMarcelo Tosatti {
4293fd232561SChristoffer Dall 	int ret;
4294fd232561SChristoffer Dall 
4295fd232561SChristoffer Dall 	vcpu_load(vcpu);
4296fd232561SChristoffer Dall 
42976352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
4298fd232561SChristoffer Dall 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
42996352e4d2SDavid Hildenbrand 				      KVM_MP_STATE_OPERATING;
4300fd232561SChristoffer Dall 
4301fd232561SChristoffer Dall 	vcpu_put(vcpu);
4302fd232561SChristoffer Dall 	return ret;
430362d9f0dbSMarcelo Tosatti }
430462d9f0dbSMarcelo Tosatti 
430562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
430662d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
430762d9f0dbSMarcelo Tosatti {
43086352e4d2SDavid Hildenbrand 	int rc = 0;
43096352e4d2SDavid Hildenbrand 
4310e83dff5eSChristoffer Dall 	vcpu_load(vcpu);
4311e83dff5eSChristoffer Dall 
43126352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
431367cf68b6SEric Farman 	kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
43146352e4d2SDavid Hildenbrand 
43156352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
43166352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
4317fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_stop(vcpu);
43186352e4d2SDavid Hildenbrand 		break;
43196352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
4320fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_start(vcpu);
43216352e4d2SDavid Hildenbrand 		break;
43226352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
43237c36a3fcSJanosch Frank 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
43247c36a3fcSJanosch Frank 			rc = -ENXIO;
43257c36a3fcSJanosch Frank 			break;
43267c36a3fcSJanosch Frank 		}
43277c36a3fcSJanosch Frank 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
43287c36a3fcSJanosch Frank 		break;
43296352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
43303b684a42SJoe Perches 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
43316352e4d2SDavid Hildenbrand 	default:
43326352e4d2SDavid Hildenbrand 		rc = -ENXIO;
43336352e4d2SDavid Hildenbrand 	}
43346352e4d2SDavid Hildenbrand 
4335e83dff5eSChristoffer Dall 	vcpu_put(vcpu);
43366352e4d2SDavid Hildenbrand 	return rc;
433762d9f0dbSMarcelo Tosatti }
433862d9f0dbSMarcelo Tosatti 
43398ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
43408ad35755SDavid Hildenbrand {
43418d5fb0dcSDavid Hildenbrand 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
43428ad35755SDavid Hildenbrand }
43438ad35755SDavid Hildenbrand 
43442c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
43452c70fe44SChristian Borntraeger {
43468ad35755SDavid Hildenbrand retry:
43478e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
43482fa6e1e1SRadim Krčmář 	if (!kvm_request_pending(vcpu))
4349586b7ccdSChristian Borntraeger 		return 0;
43502c70fe44SChristian Borntraeger 	/*
4351cc65c3a1SSean Christopherson 	 * If the guest prefix changed, re-arm the ipte notifier for the
4352b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
43532c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
43542c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
43552c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
43562c70fe44SChristian Borntraeger 	 */
4357cc65c3a1SSean Christopherson 	if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
43582c70fe44SChristian Borntraeger 		int rc;
4359b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
4360fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
4361b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
4362aca411a4SJulius Niedworok 		if (rc) {
4363cc65c3a1SSean Christopherson 			kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
43642c70fe44SChristian Borntraeger 			return rc;
4365aca411a4SJulius Niedworok 		}
43668ad35755SDavid Hildenbrand 		goto retry;
43672c70fe44SChristian Borntraeger 	}
43688ad35755SDavid Hildenbrand 
4369d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4370d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
4371d3d692c8SDavid Hildenbrand 		goto retry;
4372d3d692c8SDavid Hildenbrand 	}
4373d3d692c8SDavid Hildenbrand 
43748ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
43758ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
43768ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4377ef8f4f49SDavid Hildenbrand 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
43788ad35755SDavid Hildenbrand 		}
43798ad35755SDavid Hildenbrand 		goto retry;
43808ad35755SDavid Hildenbrand 	}
43818ad35755SDavid Hildenbrand 
43828ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
43838ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
43848ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
43859daecfc6SDavid Hildenbrand 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
43868ad35755SDavid Hildenbrand 		}
43878ad35755SDavid Hildenbrand 		goto retry;
43888ad35755SDavid Hildenbrand 	}
43898ad35755SDavid Hildenbrand 
43906502a34cSDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
43916502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
43926502a34cSDavid Hildenbrand 		goto retry;
43936502a34cSDavid Hildenbrand 	}
43946502a34cSDavid Hildenbrand 
4395190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4396190df4a2SClaudio Imbrenda 		/*
4397c9f0a2b8SJanosch Frank 		 * Disable CMM virtualization; we will emulate the ESSA
4398190df4a2SClaudio Imbrenda 		 * instruction manually, in order to provide additional
4399190df4a2SClaudio Imbrenda 		 * functionalities needed for live migration.
4400190df4a2SClaudio Imbrenda 		 */
4401190df4a2SClaudio Imbrenda 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4402190df4a2SClaudio Imbrenda 		goto retry;
4403190df4a2SClaudio Imbrenda 	}
4404190df4a2SClaudio Imbrenda 
4405190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4406190df4a2SClaudio Imbrenda 		/*
4407c9f0a2b8SJanosch Frank 		 * Re-enable CMM virtualization if CMMA is available and
4408c9f0a2b8SJanosch Frank 		 * CMM has been used.
4409190df4a2SClaudio Imbrenda 		 */
4410190df4a2SClaudio Imbrenda 		if ((vcpu->kvm->arch.use_cmma) &&
4411c9f0a2b8SJanosch Frank 		    (vcpu->kvm->mm->context.uses_cmm))
4412190df4a2SClaudio Imbrenda 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4413190df4a2SClaudio Imbrenda 		goto retry;
4414190df4a2SClaudio Imbrenda 	}
4415190df4a2SClaudio Imbrenda 
44163194cdb7SDavid Hildenbrand 	/* we left the vsie handler, nothing to do, just clear the request */
44173194cdb7SDavid Hildenbrand 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
44180759d068SDavid Hildenbrand 
44192c70fe44SChristian Borntraeger 	return 0;
44202c70fe44SChristian Borntraeger }
44212c70fe44SChristian Borntraeger 
4422c0573ba5SClaudio Imbrenda static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
44238fa1696eSCollin L. Walling {
44248fa1696eSCollin L. Walling 	struct kvm_vcpu *vcpu;
44252cfd7b73SHeiko Carstens 	union tod_clock clk;
442646808a4cSMarc Zyngier 	unsigned long i;
44278fa1696eSCollin L. Walling 
44288fa1696eSCollin L. Walling 	preempt_disable();
44298fa1696eSCollin L. Walling 
44302cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
44318fa1696eSCollin L. Walling 
44322cfd7b73SHeiko Carstens 	kvm->arch.epoch = gtod->tod - clk.tod;
44330e7def5fSDavid Hildenbrand 	kvm->arch.epdx = 0;
44340e7def5fSDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
44352cfd7b73SHeiko Carstens 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
44368fa1696eSCollin L. Walling 		if (kvm->arch.epoch > gtod->tod)
44378fa1696eSCollin L. Walling 			kvm->arch.epdx -= 1;
44380e7def5fSDavid Hildenbrand 	}
44398fa1696eSCollin L. Walling 
44408fa1696eSCollin L. Walling 	kvm_s390_vcpu_block_all(kvm);
44418fa1696eSCollin L. Walling 	kvm_for_each_vcpu(i, vcpu, kvm) {
44428fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
44438fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
44448fa1696eSCollin L. Walling 	}
44458fa1696eSCollin L. Walling 
44468fa1696eSCollin L. Walling 	kvm_s390_vcpu_unblock_all(kvm);
44478fa1696eSCollin L. Walling 	preempt_enable();
4448c0573ba5SClaudio Imbrenda }
4449c0573ba5SClaudio Imbrenda 
4450c0573ba5SClaudio Imbrenda int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4451c0573ba5SClaudio Imbrenda {
4452c0573ba5SClaudio Imbrenda 	if (!mutex_trylock(&kvm->lock))
4453c0573ba5SClaudio Imbrenda 		return 0;
4454c0573ba5SClaudio Imbrenda 	__kvm_s390_set_tod_clock(kvm, gtod);
4455c0573ba5SClaudio Imbrenda 	mutex_unlock(&kvm->lock);
4456c0573ba5SClaudio Imbrenda 	return 1;
4457c0573ba5SClaudio Imbrenda }
4458c0573ba5SClaudio Imbrenda 
4459fa576c58SThomas Huth /**
4460fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
4461fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
4462fa576c58SThomas Huth  * @gpa: Guest physical address
4463fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
4464fa576c58SThomas Huth  *
4465fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
4466fa576c58SThomas Huth  *
4467fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
4468fa576c58SThomas Huth  */
4469fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
447024eb3a82SDominik Dingel {
4471527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
4472527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
447324eb3a82SDominik Dingel }
447424eb3a82SDominik Dingel 
44753c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
44763c038e6bSDominik Dingel 				      unsigned long token)
44773c038e6bSDominik Dingel {
44783c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
4479383d0b05SJens Freimann 	struct kvm_s390_irq irq;
44803c038e6bSDominik Dingel 
44813c038e6bSDominik Dingel 	if (start_token) {
4482383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
4483383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
4484383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
44853c038e6bSDominik Dingel 	} else {
44863c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
4487383d0b05SJens Freimann 		inti.parm64 = token;
44883c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
44893c038e6bSDominik Dingel 	}
44903c038e6bSDominik Dingel }
44913c038e6bSDominik Dingel 
44922a18b7e7SVitaly Kuznetsov bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
44933c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
44943c038e6bSDominik Dingel {
44953c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
44963c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
44972a18b7e7SVitaly Kuznetsov 
44982a18b7e7SVitaly Kuznetsov 	return true;
44993c038e6bSDominik Dingel }
45003c038e6bSDominik Dingel 
45013c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
45023c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
45033c038e6bSDominik Dingel {
45043c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
45053c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
45063c038e6bSDominik Dingel }
45073c038e6bSDominik Dingel 
45083c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
45093c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
45103c038e6bSDominik Dingel {
45113c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
45123c038e6bSDominik Dingel }
45133c038e6bSDominik Dingel 
45147c0ade6cSVitaly Kuznetsov bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
45153c038e6bSDominik Dingel {
45163c038e6bSDominik Dingel 	/*
45173c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
45183c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
45193c038e6bSDominik Dingel 	 */
45203c038e6bSDominik Dingel 	return true;
45213c038e6bSDominik Dingel }
45223c038e6bSDominik Dingel 
4523e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
45243c038e6bSDominik Dingel {
45253c038e6bSDominik Dingel 	hva_t hva;
45263c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
45273c038e6bSDominik Dingel 
45283c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4529e8c22266SVitaly Kuznetsov 		return false;
45303c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
45313c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
4532e8c22266SVitaly Kuznetsov 		return false;
45333c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
4534e8c22266SVitaly Kuznetsov 		return false;
45359a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4536e8c22266SVitaly Kuznetsov 		return false;
4537b9224cd7SDavid Hildenbrand 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4538e8c22266SVitaly Kuznetsov 		return false;
45393c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
4540e8c22266SVitaly Kuznetsov 		return false;
45413c038e6bSDominik Dingel 
454281480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
454381480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
454481480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4545e8c22266SVitaly Kuznetsov 		return false;
45463c038e6bSDominik Dingel 
4547e8c22266SVitaly Kuznetsov 	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
45483c038e6bSDominik Dingel }
45493c038e6bSDominik Dingel 
45503fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4551b0c632dbSHeiko Carstens {
45523fb4c40fSThomas Huth 	int rc, cpuflags;
4553e168bf8dSCarsten Otte 
45543c038e6bSDominik Dingel 	/*
45553c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
45563c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
45573c038e6bSDominik Dingel 	 * handled outside the worker.
45583c038e6bSDominik Dingel 	 */
45593c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
45603c038e6bSDominik Dingel 
45617ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
45627ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4563b0c632dbSHeiko Carstens 
4564b0c632dbSHeiko Carstens 	if (need_resched())
4565b0c632dbSHeiko Carstens 		schedule();
4566b0c632dbSHeiko Carstens 
456779395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
456879395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
456979395031SJens Freimann 		if (rc)
457079395031SJens Freimann 			return rc;
457179395031SJens Freimann 	}
45720ff31867SCarsten Otte 
45732c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
45742c70fe44SChristian Borntraeger 	if (rc)
45752c70fe44SChristian Borntraeger 		return rc;
45762c70fe44SChristian Borntraeger 
457727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
457827291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
457927291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
458027291e21SDavid Hildenbrand 	}
458127291e21SDavid Hildenbrand 
45824eeef242SSean Christopherson 	clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
45839f30f621SMichael Mueller 
4584b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
45853fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
45863fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
45873fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
45882b29a9fdSDominik Dingel 
45893fb4c40fSThomas Huth 	return 0;
45903fb4c40fSThomas Huth }
45913fb4c40fSThomas Huth 
4592492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4593492d8642SThomas Huth {
459456317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
459556317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
459656317920SDavid Hildenbrand 	};
459756317920SDavid Hildenbrand 	u8 opcode, ilen;
4598492d8642SThomas Huth 	int rc;
4599492d8642SThomas Huth 
4600492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4601492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
4602492d8642SThomas Huth 
4603492d8642SThomas Huth 	/*
4604492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
4605492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
4606492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
4607492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
4608492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
4609492d8642SThomas Huth 	 * to be able to forward the PSW.
4610492d8642SThomas Huth 	 */
46113fa8cad7SDavid Hildenbrand 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
461256317920SDavid Hildenbrand 	ilen = insn_length(opcode);
46139b0d721aSDavid Hildenbrand 	if (rc < 0) {
46149b0d721aSDavid Hildenbrand 		return rc;
46159b0d721aSDavid Hildenbrand 	} else if (rc) {
46169b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
46179b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
46189b0d721aSDavid Hildenbrand 		 * nullification if necessary.
46199b0d721aSDavid Hildenbrand 		 */
46209b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
46219b0d721aSDavid Hildenbrand 		ilen = 4;
46229b0d721aSDavid Hildenbrand 	}
462356317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
462456317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
462556317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4626492d8642SThomas Huth }
4627492d8642SThomas Huth 
46283fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
46293fb4c40fSThomas Huth {
46304d62fcc0SQingFeng Hao 	struct mcck_volatile_info *mcck_info;
46314d62fcc0SQingFeng Hao 	struct sie_page *sie_page;
46324d62fcc0SQingFeng Hao 
46332b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
46342b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
46352b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
46362b29a9fdSDominik Dingel 
463727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
463827291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
463927291e21SDavid Hildenbrand 
46407ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
46417ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
464271f116bfSDavid Hildenbrand 
46434d62fcc0SQingFeng Hao 	if (exit_reason == -EINTR) {
46444d62fcc0SQingFeng Hao 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
46454d62fcc0SQingFeng Hao 		sie_page = container_of(vcpu->arch.sie_block,
46464d62fcc0SQingFeng Hao 					struct sie_page, sie_block);
46474d62fcc0SQingFeng Hao 		mcck_info = &sie_page->mcck_info;
46484d62fcc0SQingFeng Hao 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
46494d62fcc0SQingFeng Hao 		return 0;
46504d62fcc0SQingFeng Hao 	}
46514d62fcc0SQingFeng Hao 
465271f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
465371f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
465471f116bfSDavid Hildenbrand 
465571f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
465671f116bfSDavid Hildenbrand 			return rc;
465771f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
465871f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
465971f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
466071f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
466171f116bfSDavid Hildenbrand 		return -EREMOTE;
466271f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
466371f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
466471f116bfSDavid Hildenbrand 		return 0;
4665210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4666210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4667210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
4668210b1607SThomas Huth 						current->thread.gmap_addr;
4669210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
467071f116bfSDavid Hildenbrand 		return -EREMOTE;
467124eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
46723c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
467324eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
467471f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
467571f116bfSDavid Hildenbrand 			return 0;
467650a05be4SChristian Borntraeger 		vcpu->stat.pfault_sync++;
467771f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4678fa576c58SThomas Huth 	}
467971f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
46803fb4c40fSThomas Huth }
46813fb4c40fSThomas Huth 
46823adae0b4SJanosch Frank #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
46833fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
46843fb4c40fSThomas Huth {
46853fb4c40fSThomas Huth 	int rc, exit_reason;
4686c8aac234SJanosch Frank 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
46873fb4c40fSThomas Huth 
4688800c1065SThomas Huth 	/*
4689800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4690800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
4691800c1065SThomas Huth 	 */
46922031f287SSean Christopherson 	kvm_vcpu_srcu_read_lock(vcpu);
4693800c1065SThomas Huth 
4694a76ccff6SThomas Huth 	do {
46953fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
46963fb4c40fSThomas Huth 		if (rc)
4697a76ccff6SThomas Huth 			break;
46983fb4c40fSThomas Huth 
46992031f287SSean Christopherson 		kvm_vcpu_srcu_read_unlock(vcpu);
47003fb4c40fSThomas Huth 		/*
4701a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
4702a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
47033fb4c40fSThomas Huth 		 */
47040097d12eSChristian Borntraeger 		local_irq_disable();
47056edaa530SPaolo Bonzini 		guest_enter_irqoff();
4706db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
47070097d12eSChristian Borntraeger 		local_irq_enable();
4708c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4709c8aac234SJanosch Frank 			memcpy(sie_page->pv_grregs,
4710c8aac234SJanosch Frank 			       vcpu->run->s.regs.gprs,
4711c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
4712c8aac234SJanosch Frank 		}
471356e62a73SSven Schnelle 		if (test_cpu_flag(CIF_FPU))
471456e62a73SSven Schnelle 			load_fpu_regs();
4715a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
4716a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
4717c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4718c8aac234SJanosch Frank 			memcpy(vcpu->run->s.regs.gprs,
4719c8aac234SJanosch Frank 			       sie_page->pv_grregs,
4720c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
47213adae0b4SJanosch Frank 			/*
47223adae0b4SJanosch Frank 			 * We're not allowed to inject interrupts on intercepts
47233adae0b4SJanosch Frank 			 * that leave the guest state in an "in-between" state
47243adae0b4SJanosch Frank 			 * where the next SIE entry will do a continuation.
47253adae0b4SJanosch Frank 			 * Fence interrupts in our "internal" PSW.
47263adae0b4SJanosch Frank 			 */
47273adae0b4SJanosch Frank 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
47283adae0b4SJanosch Frank 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
47293adae0b4SJanosch Frank 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
47303adae0b4SJanosch Frank 			}
4731c8aac234SJanosch Frank 		}
47320097d12eSChristian Borntraeger 		local_irq_disable();
4733db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
47346edaa530SPaolo Bonzini 		guest_exit_irqoff();
47350097d12eSChristian Borntraeger 		local_irq_enable();
47362031f287SSean Christopherson 		kvm_vcpu_srcu_read_lock(vcpu);
47373fb4c40fSThomas Huth 
47383fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
473927291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
47403fb4c40fSThomas Huth 
47412031f287SSean Christopherson 	kvm_vcpu_srcu_read_unlock(vcpu);
4742e168bf8dSCarsten Otte 	return rc;
4743b0c632dbSHeiko Carstens }
4744b0c632dbSHeiko Carstens 
47452f0a83beSTianjia Zhang static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4746b028ee3eSDavid Hildenbrand {
47472f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
47484d5f2c04SChristian Borntraeger 	struct runtime_instr_cb *riccb;
47494e0b1ab7SFan Zhang 	struct gs_cb *gscb;
47504d5f2c04SChristian Borntraeger 
47514d5f2c04SChristian Borntraeger 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
47524e0b1ab7SFan Zhang 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4753b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4754b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4755b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4756b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4757b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4758b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4759b028ee3eSDavid Hildenbrand 	}
4760b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4761b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4762b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4763b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
47649fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
47659fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
4766b028ee3eSDavid Hildenbrand 	}
476723a60f83SCollin Walling 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
476823a60f83SCollin Walling 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
476923a60f83SCollin Walling 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
47703fd8417fSCollin Walling 		VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
477123a60f83SCollin Walling 	}
477280cd8763SFan Zhang 	/*
477380cd8763SFan Zhang 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
477480cd8763SFan Zhang 	 * we should enable RI here instead of doing the lazy enablement.
477580cd8763SFan Zhang 	 */
477680cd8763SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
47774d5f2c04SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 64) &&
4778bb59c2daSAlice Frosi 	    riccb->v &&
47790c9d8683SDavid Hildenbrand 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
47804d5f2c04SChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
47810c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
478280cd8763SFan Zhang 	}
47834e0b1ab7SFan Zhang 	/*
47844e0b1ab7SFan Zhang 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
47854e0b1ab7SFan Zhang 	 * we should enable GS here instead of doing the lazy enablement.
47864e0b1ab7SFan Zhang 	 */
47874e0b1ab7SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
47884e0b1ab7SFan Zhang 	    test_kvm_facility(vcpu->kvm, 133) &&
47894e0b1ab7SFan Zhang 	    gscb->gssm &&
47904e0b1ab7SFan Zhang 	    !vcpu->arch.gs_enabled) {
47914e0b1ab7SFan Zhang 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
47924e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecb |= ECB_GS;
47934e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
47944e0b1ab7SFan Zhang 		vcpu->arch.gs_enabled = 1;
479580cd8763SFan Zhang 	}
479635b3fde6SChristian Borntraeger 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
479735b3fde6SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 82)) {
479835b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
479935b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
480035b3fde6SChristian Borntraeger 	}
48014e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
48024e0b1ab7SFan Zhang 		preempt_disable();
48034e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
48044e0b1ab7SFan Zhang 		if (current->thread.gs_cb) {
48054e0b1ab7SFan Zhang 			vcpu->arch.host_gscb = current->thread.gs_cb;
48064e0b1ab7SFan Zhang 			save_gs_cb(vcpu->arch.host_gscb);
48074e0b1ab7SFan Zhang 		}
48084e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled) {
48094e0b1ab7SFan Zhang 			current->thread.gs_cb = (struct gs_cb *)
48104e0b1ab7SFan Zhang 						&vcpu->run->s.regs.gscb;
48114e0b1ab7SFan Zhang 			restore_gs_cb(current->thread.gs_cb);
48124e0b1ab7SFan Zhang 		}
48134e0b1ab7SFan Zhang 		preempt_enable();
48144e0b1ab7SFan Zhang 	}
4815a3da7b4aSChristian Borntraeger 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4816811ea797SJanosch Frank }
4817811ea797SJanosch Frank 
48182f0a83beSTianjia Zhang static void sync_regs(struct kvm_vcpu *vcpu)
4819811ea797SJanosch Frank {
48202f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
48212f0a83beSTianjia Zhang 
4822811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4823811ea797SJanosch Frank 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4824811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4825811ea797SJanosch Frank 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4826811ea797SJanosch Frank 		/* some control register changes require a tlb flush */
4827811ea797SJanosch Frank 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4828811ea797SJanosch Frank 	}
4829811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4830811ea797SJanosch Frank 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4831811ea797SJanosch Frank 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4832811ea797SJanosch Frank 	}
4833811ea797SJanosch Frank 	save_access_regs(vcpu->arch.host_acrs);
4834811ea797SJanosch Frank 	restore_access_regs(vcpu->run->s.regs.acrs);
4835811ea797SJanosch Frank 	/* save host (userspace) fprs/vrs */
4836811ea797SJanosch Frank 	save_fpu_regs();
4837811ea797SJanosch Frank 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4838811ea797SJanosch Frank 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4839811ea797SJanosch Frank 	if (MACHINE_HAS_VX)
4840811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4841811ea797SJanosch Frank 	else
4842811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4843811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4844811ea797SJanosch Frank 	if (test_fp_ctl(current->thread.fpu.fpc))
4845811ea797SJanosch Frank 		/* User space provided an invalid FPC, let's clear it */
4846811ea797SJanosch Frank 		current->thread.fpu.fpc = 0;
4847811ea797SJanosch Frank 
4848811ea797SJanosch Frank 	/* Sync fmt2 only data */
4849811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
48502f0a83beSTianjia Zhang 		sync_regs_fmt2(vcpu);
4851811ea797SJanosch Frank 	} else {
4852811ea797SJanosch Frank 		/*
4853811ea797SJanosch Frank 		 * In several places we have to modify our internal view to
4854811ea797SJanosch Frank 		 * not do things that are disallowed by the ultravisor. For
4855811ea797SJanosch Frank 		 * example we must not inject interrupts after specific exits
4856811ea797SJanosch Frank 		 * (e.g. 112 prefix page not secure). We do this by turning
4857811ea797SJanosch Frank 		 * off the machine check, external and I/O interrupt bits
4858811ea797SJanosch Frank 		 * of our PSW copy. To avoid getting validity intercepts, we
4859811ea797SJanosch Frank 		 * do only accept the condition code from userspace.
4860811ea797SJanosch Frank 		 */
4861811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4862811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4863811ea797SJanosch Frank 						   PSW_MASK_CC;
4864811ea797SJanosch Frank 	}
486580cd8763SFan Zhang 
4866b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
4867b028ee3eSDavid Hildenbrand }
4868b028ee3eSDavid Hildenbrand 
48692f0a83beSTianjia Zhang static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4870b028ee3eSDavid Hildenbrand {
48712f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
48722f0a83beSTianjia Zhang 
4873b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4874b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4875b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
487635b3fde6SChristian Borntraeger 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
487723a60f83SCollin Walling 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
48784e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
487944bada28SHeiko Carstens 		preempt_disable();
48804e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
48814e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled)
48824e0b1ab7SFan Zhang 			save_gs_cb(current->thread.gs_cb);
48834e0b1ab7SFan Zhang 		current->thread.gs_cb = vcpu->arch.host_gscb;
48844e0b1ab7SFan Zhang 		restore_gs_cb(vcpu->arch.host_gscb);
48854e0b1ab7SFan Zhang 		if (!vcpu->arch.host_gscb)
48864e0b1ab7SFan Zhang 			__ctl_clear_bit(2, 4);
48874e0b1ab7SFan Zhang 		vcpu->arch.host_gscb = NULL;
488844bada28SHeiko Carstens 		preempt_enable();
48894e0b1ab7SFan Zhang 	}
4890a3da7b4aSChristian Borntraeger 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4891b028ee3eSDavid Hildenbrand }
4892b028ee3eSDavid Hildenbrand 
48932f0a83beSTianjia Zhang static void store_regs(struct kvm_vcpu *vcpu)
4894811ea797SJanosch Frank {
48952f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
48962f0a83beSTianjia Zhang 
4897811ea797SJanosch Frank 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4898811ea797SJanosch Frank 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4899811ea797SJanosch Frank 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4900811ea797SJanosch Frank 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4901811ea797SJanosch Frank 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4902811ea797SJanosch Frank 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4903811ea797SJanosch Frank 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4904811ea797SJanosch Frank 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4905811ea797SJanosch Frank 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4906811ea797SJanosch Frank 	save_access_regs(vcpu->run->s.regs.acrs);
4907811ea797SJanosch Frank 	restore_access_regs(vcpu->arch.host_acrs);
4908811ea797SJanosch Frank 	/* Save guest register state */
4909811ea797SJanosch Frank 	save_fpu_regs();
4910811ea797SJanosch Frank 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4911811ea797SJanosch Frank 	/* Restore will be done lazily at return */
4912811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4913811ea797SJanosch Frank 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4914811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
49152f0a83beSTianjia Zhang 		store_regs_fmt2(vcpu);
4916811ea797SJanosch Frank }
4917811ea797SJanosch Frank 
49181b94f6f8STianjia Zhang int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4919b0c632dbSHeiko Carstens {
49201b94f6f8STianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
49218f2abe6aSChristian Borntraeger 	int rc;
4922b0c632dbSHeiko Carstens 
49230460eb35SJanosch Frank 	/*
49240460eb35SJanosch Frank 	 * Running a VM while dumping always has the potential to
49250460eb35SJanosch Frank 	 * produce inconsistent dump data. But for PV vcpus a SIE
49260460eb35SJanosch Frank 	 * entry while dumping could also lead to a fatal validity
49270460eb35SJanosch Frank 	 * intercept which we absolutely want to avoid.
49280460eb35SJanosch Frank 	 */
49290460eb35SJanosch Frank 	if (vcpu->kvm->arch.pv.dumping)
49300460eb35SJanosch Frank 		return -EINVAL;
49310460eb35SJanosch Frank 
4932460df4c1SPaolo Bonzini 	if (kvm_run->immediate_exit)
4933460df4c1SPaolo Bonzini 		return -EINTR;
4934460df4c1SPaolo Bonzini 
4935200824f5SThomas Huth 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4936200824f5SThomas Huth 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4937200824f5SThomas Huth 		return -EINVAL;
4938200824f5SThomas Huth 
4939accb757dSChristoffer Dall 	vcpu_load(vcpu);
4940accb757dSChristoffer Dall 
494127291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
494227291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
4943accb757dSChristoffer Dall 		rc = 0;
4944accb757dSChristoffer Dall 		goto out;
494527291e21SDavid Hildenbrand 	}
494627291e21SDavid Hildenbrand 
494720b7035cSJan H. Schönherr 	kvm_sigset_activate(vcpu);
4948b0c632dbSHeiko Carstens 
4949fe28c786SJanosch Frank 	/*
4950fe28c786SJanosch Frank 	 * no need to check the return value of vcpu_start as it can only have
4951fe28c786SJanosch Frank 	 * an error for protvirt, but protvirt means user cpu state
4952fe28c786SJanosch Frank 	 */
49536352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
49546852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
49556352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
4956ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
49576352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
4958accb757dSChristoffer Dall 		rc = -EINVAL;
4959accb757dSChristoffer Dall 		goto out;
49606352e4d2SDavid Hildenbrand 	}
4961b0c632dbSHeiko Carstens 
49622f0a83beSTianjia Zhang 	sync_regs(vcpu);
4963db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
4964d7b0b5ebSCarsten Otte 
4965dab4079dSHeiko Carstens 	might_fault();
4966e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
49679ace903dSChristian Ehrhardt 
4968b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
4969b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
49708f2abe6aSChristian Borntraeger 		rc = -EINTR;
4971b1d16c49SChristian Ehrhardt 	}
49728f2abe6aSChristian Borntraeger 
497327291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
497427291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
497527291e21SDavid Hildenbrand 		rc = 0;
497627291e21SDavid Hildenbrand 	}
497727291e21SDavid Hildenbrand 
49788f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
497971f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
49808f2abe6aSChristian Borntraeger 		rc = 0;
49818f2abe6aSChristian Borntraeger 	}
49828f2abe6aSChristian Borntraeger 
4983db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
49842f0a83beSTianjia Zhang 	store_regs(vcpu);
4985d7b0b5ebSCarsten Otte 
498620b7035cSJan H. Schönherr 	kvm_sigset_deactivate(vcpu);
4987b0c632dbSHeiko Carstens 
4988b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
4989accb757dSChristoffer Dall out:
4990accb757dSChristoffer Dall 	vcpu_put(vcpu);
49917e8e6ab4SHeiko Carstens 	return rc;
4992b0c632dbSHeiko Carstens }
4993b0c632dbSHeiko Carstens 
4994b0c632dbSHeiko Carstens /*
4995b0c632dbSHeiko Carstens  * store status at address
4996b0c632dbSHeiko Carstens  * we use have two special cases:
4997b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4998b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4999b0c632dbSHeiko Carstens  */
5000d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5001b0c632dbSHeiko Carstens {
5002092670cdSCarsten Otte 	unsigned char archmode = 1;
50039abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
5004fda902cbSMichael Mueller 	unsigned int px;
50054287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
5006d0bce605SHeiko Carstens 	int rc;
5007b0c632dbSHeiko Carstens 
5008d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
5009d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5010d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
5011b0c632dbSHeiko Carstens 			return -EFAULT;
5012d9a3a09aSMartin Schwidefsky 		gpa = 0;
5013d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5014d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
5015b0c632dbSHeiko Carstens 			return -EFAULT;
5016d9a3a09aSMartin Schwidefsky 		gpa = px;
5017d9a3a09aSMartin Schwidefsky 	} else
5018d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
50199abc2a08SDavid Hildenbrand 
50209abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
50219abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
50229522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5023d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
50249abc2a08SDavid Hildenbrand 				     fprs, 128);
50259abc2a08SDavid Hildenbrand 	} else {
50269abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
50276fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
50289abc2a08SDavid Hildenbrand 	}
5029d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5030d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
5031d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5032d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
5033d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5034fda902cbSMichael Mueller 			      &px, 4);
5035d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
50369abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
5037d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5038d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
50394287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
5040d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
50414287f247SDavid Hildenbrand 			      &cputm, 8);
5042178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
5043d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5044d0bce605SHeiko Carstens 			      &clkcomp, 8);
5045d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5046d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
5047d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5048d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
5049d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
5050b0c632dbSHeiko Carstens }
5051b0c632dbSHeiko Carstens 
5052e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5053e879892cSThomas Huth {
5054e879892cSThomas Huth 	/*
5055e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
505631d8b8d4SChristian Borntraeger 	 * switch in the run ioctl. Let's update our copies before we save
5057e879892cSThomas Huth 	 * it into the save area
5058e879892cSThomas Huth 	 */
5059d0164ee2SHendrik Brueckner 	save_fpu_regs();
50609abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
5061e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
5062e879892cSThomas Huth 
5063e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
5064e879892cSThomas Huth }
5065e879892cSThomas Huth 
50668ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
50678ad35755SDavid Hildenbrand {
50688ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
50698e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
50708ad35755SDavid Hildenbrand }
50718ad35755SDavid Hildenbrand 
50728ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
50738ad35755SDavid Hildenbrand {
507446808a4cSMarc Zyngier 	unsigned long i;
50758ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
50768ad35755SDavid Hildenbrand 
50778ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
50788ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
50798ad35755SDavid Hildenbrand 	}
50808ad35755SDavid Hildenbrand }
50818ad35755SDavid Hildenbrand 
50828ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
50838ad35755SDavid Hildenbrand {
508409a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
508509a400e7SDavid Hildenbrand 		return;
50868ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
50878e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
50888ad35755SDavid Hildenbrand }
50898ad35755SDavid Hildenbrand 
5090fe28c786SJanosch Frank int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
50916852d7b6SDavid Hildenbrand {
5092fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
50938ad35755SDavid Hildenbrand 
50948ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
5095fe28c786SJanosch Frank 		return 0;
50968ad35755SDavid Hildenbrand 
50976852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
50988ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5099433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
51008ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
51018ad35755SDavid Hildenbrand 
5102fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the operating state */
5103fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5104fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5105fe28c786SJanosch Frank 		if (r) {
5106fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5107fe28c786SJanosch Frank 			return r;
5108fe28c786SJanosch Frank 		}
5109fe28c786SJanosch Frank 	}
5110fe28c786SJanosch Frank 
51118ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
5112113d10bcSMarc Zyngier 		if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
51138ad35755SDavid Hildenbrand 			started_vcpus++;
51148ad35755SDavid Hildenbrand 	}
51158ad35755SDavid Hildenbrand 
51168ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
51178ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
51188ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
51198ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
51208ad35755SDavid Hildenbrand 		/*
51218ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
51228ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
512338860756SBhaskar Chowdhury 		 * outstanding ENABLE requests.
51248ad35755SDavid Hildenbrand 		 */
51258ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
51268ad35755SDavid Hildenbrand 	}
51278ad35755SDavid Hildenbrand 
51289daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
51298ad35755SDavid Hildenbrand 	/*
513072f21820SChristian Borntraeger 	 * The real PSW might have changed due to a RESTART interpreted by the
513172f21820SChristian Borntraeger 	 * ultravisor. We block all interrupts and let the next sie exit
513272f21820SChristian Borntraeger 	 * refresh our view.
513372f21820SChristian Borntraeger 	 */
513472f21820SChristian Borntraeger 	if (kvm_s390_pv_cpu_is_protected(vcpu))
513572f21820SChristian Borntraeger 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
513672f21820SChristian Borntraeger 	/*
51378ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
51388ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
51398ad35755SDavid Hildenbrand 	 */
5140d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5141433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5142fe28c786SJanosch Frank 	return 0;
51436852d7b6SDavid Hildenbrand }
51446852d7b6SDavid Hildenbrand 
5145fe28c786SJanosch Frank int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
51466852d7b6SDavid Hildenbrand {
5147fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
51488ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
51498ad35755SDavid Hildenbrand 
51508ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
5151fe28c786SJanosch Frank 		return 0;
51528ad35755SDavid Hildenbrand 
51536852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
51548ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
5155433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
51568ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
51578ad35755SDavid Hildenbrand 
5158fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the stopped state */
5159fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5160fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5161fe28c786SJanosch Frank 		if (r) {
5162fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5163fe28c786SJanosch Frank 			return r;
5164fe28c786SJanosch Frank 		}
5165fe28c786SJanosch Frank 	}
5166fe28c786SJanosch Frank 
5167812de046SEric Farman 	/*
5168812de046SEric Farman 	 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5169812de046SEric Farman 	 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5170812de046SEric Farman 	 * have been fully processed. This will ensure that the VCPU
5171812de046SEric Farman 	 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5172812de046SEric Farman 	 */
5173812de046SEric Farman 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
51746cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
517532f5ff63SDavid Hildenbrand 
51768ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
51778ad35755SDavid Hildenbrand 
51788ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
5179113d10bcSMarc Zyngier 		struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5180113d10bcSMarc Zyngier 
5181113d10bcSMarc Zyngier 		if (!is_vcpu_stopped(tmp)) {
51828ad35755SDavid Hildenbrand 			started_vcpus++;
5183113d10bcSMarc Zyngier 			started_vcpu = tmp;
51848ad35755SDavid Hildenbrand 		}
51858ad35755SDavid Hildenbrand 	}
51868ad35755SDavid Hildenbrand 
51878ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
51888ad35755SDavid Hildenbrand 		/*
51898ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
51908ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
51918ad35755SDavid Hildenbrand 		 */
51928ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
51938ad35755SDavid Hildenbrand 	}
51948ad35755SDavid Hildenbrand 
5195433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5196fe28c786SJanosch Frank 	return 0;
51976852d7b6SDavid Hildenbrand }
51986852d7b6SDavid Hildenbrand 
5199d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5200d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
5201d6712df9SCornelia Huck {
5202d6712df9SCornelia Huck 	int r;
5203d6712df9SCornelia Huck 
5204d6712df9SCornelia Huck 	if (cap->flags)
5205d6712df9SCornelia Huck 		return -EINVAL;
5206d6712df9SCornelia Huck 
5207d6712df9SCornelia Huck 	switch (cap->cap) {
5208fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
5209fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
5210fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
5211c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5212fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
5213fa6b7fe9SCornelia Huck 		}
5214fa6b7fe9SCornelia Huck 		r = 0;
5215fa6b7fe9SCornelia Huck 		break;
5216d6712df9SCornelia Huck 	default:
5217d6712df9SCornelia Huck 		r = -EINVAL;
5218d6712df9SCornelia Huck 		break;
5219d6712df9SCornelia Huck 	}
5220d6712df9SCornelia Huck 	return r;
5221d6712df9SCornelia Huck }
5222d6712df9SCornelia Huck 
52230e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
522419e12277SJanosch Frank 				  struct kvm_s390_mem_op *mop)
522519e12277SJanosch Frank {
522619e12277SJanosch Frank 	void __user *uaddr = (void __user *)mop->buf;
5227b99f4512SNico Boehr 	void *sida_addr;
522819e12277SJanosch Frank 	int r = 0;
522919e12277SJanosch Frank 
523019e12277SJanosch Frank 	if (mop->flags || !mop->size)
523119e12277SJanosch Frank 		return -EINVAL;
523219e12277SJanosch Frank 	if (mop->size + mop->sida_offset < mop->size)
523319e12277SJanosch Frank 		return -EINVAL;
523419e12277SJanosch Frank 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
523519e12277SJanosch Frank 		return -E2BIG;
52362c212e1bSJanis Schoetterl-Glausch 	if (!kvm_s390_pv_cpu_is_protected(vcpu))
52372c212e1bSJanis Schoetterl-Glausch 		return -EINVAL;
523819e12277SJanosch Frank 
5239b99f4512SNico Boehr 	sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5240b99f4512SNico Boehr 
524119e12277SJanosch Frank 	switch (mop->op) {
524219e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
5243b99f4512SNico Boehr 		if (copy_to_user(uaddr, sida_addr, mop->size))
524419e12277SJanosch Frank 			r = -EFAULT;
524519e12277SJanosch Frank 
524619e12277SJanosch Frank 		break;
524719e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
5248b99f4512SNico Boehr 		if (copy_from_user(sida_addr, uaddr, mop->size))
524919e12277SJanosch Frank 			r = -EFAULT;
525019e12277SJanosch Frank 		break;
525119e12277SJanosch Frank 	}
525219e12277SJanosch Frank 	return r;
525319e12277SJanosch Frank }
52540e1234c0SJanis Schoetterl-Glausch 
52550e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
525641408c28SThomas Huth 				 struct kvm_s390_mem_op *mop)
525741408c28SThomas Huth {
525841408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
525941408c28SThomas Huth 	void *tmpbuf = NULL;
526019e12277SJanosch Frank 	int r = 0;
526141408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
5262e9e9feebSJanis Schoetterl-Glausch 				    | KVM_S390_MEMOP_F_CHECK_ONLY
5263e9e9feebSJanis Schoetterl-Glausch 				    | KVM_S390_MEMOP_F_SKEY_PROTECTION;
526441408c28SThomas Huth 
5265a13b03bbSThomas Huth 	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
526641408c28SThomas Huth 		return -EINVAL;
526741408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
526841408c28SThomas Huth 		return -E2BIG;
526919e12277SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu))
527019e12277SJanosch Frank 		return -EINVAL;
5271e9e9feebSJanis Schoetterl-Glausch 	if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
5272e9e9feebSJanis Schoetterl-Glausch 		if (access_key_invalid(mop->key))
5273e9e9feebSJanis Schoetterl-Glausch 			return -EINVAL;
5274e9e9feebSJanis Schoetterl-Glausch 	} else {
5275e9e9feebSJanis Schoetterl-Glausch 		mop->key = 0;
5276e9e9feebSJanis Schoetterl-Glausch 	}
527741408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
527841408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
527941408c28SThomas Huth 		if (!tmpbuf)
528041408c28SThomas Huth 			return -ENOMEM;
528141408c28SThomas Huth 	}
528241408c28SThomas Huth 
528341408c28SThomas Huth 	switch (mop->op) {
528441408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
528541408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5286e9e9feebSJanis Schoetterl-Glausch 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5287e9e9feebSJanis Schoetterl-Glausch 					    GACC_FETCH, mop->key);
528841408c28SThomas Huth 			break;
528941408c28SThomas Huth 		}
5290e9e9feebSJanis Schoetterl-Glausch 		r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5291e9e9feebSJanis Schoetterl-Glausch 					mop->size, mop->key);
529241408c28SThomas Huth 		if (r == 0) {
529341408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
529441408c28SThomas Huth 				r = -EFAULT;
529541408c28SThomas Huth 		}
529641408c28SThomas Huth 		break;
529741408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
529841408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5299e9e9feebSJanis Schoetterl-Glausch 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5300e9e9feebSJanis Schoetterl-Glausch 					    GACC_STORE, mop->key);
530141408c28SThomas Huth 			break;
530241408c28SThomas Huth 		}
530341408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
530441408c28SThomas Huth 			r = -EFAULT;
530541408c28SThomas Huth 			break;
530641408c28SThomas Huth 		}
5307e9e9feebSJanis Schoetterl-Glausch 		r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5308e9e9feebSJanis Schoetterl-Glausch 					 mop->size, mop->key);
530941408c28SThomas Huth 		break;
531041408c28SThomas Huth 	}
531141408c28SThomas Huth 
531241408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
531341408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
531441408c28SThomas Huth 
531541408c28SThomas Huth 	vfree(tmpbuf);
531641408c28SThomas Huth 	return r;
531741408c28SThomas Huth }
531841408c28SThomas Huth 
53190e1234c0SJanis Schoetterl-Glausch static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
532019e12277SJanosch Frank 				     struct kvm_s390_mem_op *mop)
532119e12277SJanosch Frank {
532219e12277SJanosch Frank 	int r, srcu_idx;
532319e12277SJanosch Frank 
532419e12277SJanosch Frank 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
532519e12277SJanosch Frank 
532619e12277SJanosch Frank 	switch (mop->op) {
532719e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_READ:
532819e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_WRITE:
53290e1234c0SJanis Schoetterl-Glausch 		r = kvm_s390_vcpu_mem_op(vcpu, mop);
533019e12277SJanosch Frank 		break;
533119e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
533219e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
533319e12277SJanosch Frank 		/* we are locked against sida going away by the vcpu->mutex */
53340e1234c0SJanis Schoetterl-Glausch 		r = kvm_s390_vcpu_sida_op(vcpu, mop);
533519e12277SJanosch Frank 		break;
533619e12277SJanosch Frank 	default:
533719e12277SJanosch Frank 		r = -EINVAL;
533819e12277SJanosch Frank 	}
533919e12277SJanosch Frank 
534019e12277SJanosch Frank 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
534119e12277SJanosch Frank 	return r;
534219e12277SJanosch Frank }
534319e12277SJanosch Frank 
53445cb0944cSPaolo Bonzini long kvm_arch_vcpu_async_ioctl(struct file *filp,
5345b0c632dbSHeiko Carstens 			       unsigned int ioctl, unsigned long arg)
5346b0c632dbSHeiko Carstens {
5347b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
5348b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
5349b0c632dbSHeiko Carstens 
535093736624SAvi Kivity 	switch (ioctl) {
535147b43c52SJens Freimann 	case KVM_S390_IRQ: {
535247b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
535347b43c52SJens Freimann 
535447b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
53559b062471SChristoffer Dall 			return -EFAULT;
53569b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
535747b43c52SJens Freimann 	}
535893736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
5359ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
536053936b5bSThomas Huth 		struct kvm_s390_irq s390irq = {};
5361ba5c1e9bSCarsten Otte 
5362ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
53639b062471SChristoffer Dall 			return -EFAULT;
5364383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
5365383d0b05SJens Freimann 			return -EINVAL;
53669b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
5367ba5c1e9bSCarsten Otte 	}
53689b062471SChristoffer Dall 	}
53695cb0944cSPaolo Bonzini 	return -ENOIOCTLCMD;
53705cb0944cSPaolo Bonzini }
53715cb0944cSPaolo Bonzini 
53728aba0958SJanosch Frank static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
53738aba0958SJanosch Frank 					struct kvm_pv_cmd *cmd)
53748aba0958SJanosch Frank {
53758aba0958SJanosch Frank 	struct kvm_s390_pv_dmp dmp;
53768aba0958SJanosch Frank 	void *data;
53778aba0958SJanosch Frank 	int ret;
53788aba0958SJanosch Frank 
53798aba0958SJanosch Frank 	/* Dump initialization is a prerequisite */
53808aba0958SJanosch Frank 	if (!vcpu->kvm->arch.pv.dumping)
53818aba0958SJanosch Frank 		return -EINVAL;
53828aba0958SJanosch Frank 
53838aba0958SJanosch Frank 	if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
53848aba0958SJanosch Frank 		return -EFAULT;
53858aba0958SJanosch Frank 
53868aba0958SJanosch Frank 	/* We only handle this subcmd right now */
53878aba0958SJanosch Frank 	if (dmp.subcmd != KVM_PV_DUMP_CPU)
53888aba0958SJanosch Frank 		return -EINVAL;
53898aba0958SJanosch Frank 
53908aba0958SJanosch Frank 	/* CPU dump length is the same as create cpu storage donation. */
53918aba0958SJanosch Frank 	if (dmp.buff_len != uv_info.guest_cpu_stor_len)
53928aba0958SJanosch Frank 		return -EINVAL;
53938aba0958SJanosch Frank 
53948aba0958SJanosch Frank 	data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
53958aba0958SJanosch Frank 	if (!data)
53968aba0958SJanosch Frank 		return -ENOMEM;
53978aba0958SJanosch Frank 
53988aba0958SJanosch Frank 	ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
53998aba0958SJanosch Frank 
54008aba0958SJanosch Frank 	VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
54018aba0958SJanosch Frank 		   vcpu->vcpu_id, cmd->rc, cmd->rrc);
54028aba0958SJanosch Frank 
54038aba0958SJanosch Frank 	if (ret)
54048aba0958SJanosch Frank 		ret = -EINVAL;
54058aba0958SJanosch Frank 
54068aba0958SJanosch Frank 	/* On success copy over the dump data */
54078aba0958SJanosch Frank 	if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
54088aba0958SJanosch Frank 		ret = -EFAULT;
54098aba0958SJanosch Frank 
54108aba0958SJanosch Frank 	kvfree(data);
54118aba0958SJanosch Frank 	return ret;
54128aba0958SJanosch Frank }
54138aba0958SJanosch Frank 
54145cb0944cSPaolo Bonzini long kvm_arch_vcpu_ioctl(struct file *filp,
54155cb0944cSPaolo Bonzini 			 unsigned int ioctl, unsigned long arg)
54165cb0944cSPaolo Bonzini {
54175cb0944cSPaolo Bonzini 	struct kvm_vcpu *vcpu = filp->private_data;
54185cb0944cSPaolo Bonzini 	void __user *argp = (void __user *)arg;
54195cb0944cSPaolo Bonzini 	int idx;
54205cb0944cSPaolo Bonzini 	long r;
54218a8378faSJanosch Frank 	u16 rc, rrc;
54229b062471SChristoffer Dall 
54239b062471SChristoffer Dall 	vcpu_load(vcpu);
54249b062471SChristoffer Dall 
54259b062471SChristoffer Dall 	switch (ioctl) {
5426b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
5427800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
542855680890SChristian Borntraeger 		r = kvm_s390_store_status_unloaded(vcpu, arg);
5429800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
5430bc923cc9SAvi Kivity 		break;
5431b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
5432b0c632dbSHeiko Carstens 		psw_t psw;
5433b0c632dbSHeiko Carstens 
5434bc923cc9SAvi Kivity 		r = -EFAULT;
5435b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
5436bc923cc9SAvi Kivity 			break;
5437bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5438bc923cc9SAvi Kivity 		break;
5439b0c632dbSHeiko Carstens 	}
54407de3f142SJanosch Frank 	case KVM_S390_CLEAR_RESET:
54417de3f142SJanosch Frank 		r = 0;
54427de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
54438a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
54448a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
54458a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
54468a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
54478a8378faSJanosch Frank 				   rc, rrc);
54488a8378faSJanosch Frank 		}
54497de3f142SJanosch Frank 		break;
5450b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
54517de3f142SJanosch Frank 		r = 0;
54527de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
54538a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
54548a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
54558a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_INITIAL,
54568a8378faSJanosch Frank 					  &rc, &rrc);
54578a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
54588a8378faSJanosch Frank 				   rc, rrc);
54598a8378faSJanosch Frank 		}
54607de3f142SJanosch Frank 		break;
54617de3f142SJanosch Frank 	case KVM_S390_NORMAL_RESET:
54627de3f142SJanosch Frank 		r = 0;
54637de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
54648a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
54658a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
54668a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET, &rc, &rrc);
54678a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
54688a8378faSJanosch Frank 				   rc, rrc);
54698a8378faSJanosch Frank 		}
5470bc923cc9SAvi Kivity 		break;
547114eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
547214eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
547314eebd91SCarsten Otte 		struct kvm_one_reg reg;
547468cf7b1fSJanosch Frank 		r = -EINVAL;
547568cf7b1fSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu))
547668cf7b1fSJanosch Frank 			break;
547714eebd91SCarsten Otte 		r = -EFAULT;
547814eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
547914eebd91SCarsten Otte 			break;
548014eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
548114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
548214eebd91SCarsten Otte 		else
548314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
548414eebd91SCarsten Otte 		break;
548514eebd91SCarsten Otte 	}
548627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
548727e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
548827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
548927e0393fSCarsten Otte 
549027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
549127e0393fSCarsten Otte 			r = -EFAULT;
549227e0393fSCarsten Otte 			break;
549327e0393fSCarsten Otte 		}
549427e0393fSCarsten Otte 
549527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
549627e0393fSCarsten Otte 			r = -EINVAL;
549727e0393fSCarsten Otte 			break;
549827e0393fSCarsten Otte 		}
549927e0393fSCarsten Otte 
550027e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
550127e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
550227e0393fSCarsten Otte 		break;
550327e0393fSCarsten Otte 	}
550427e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
550527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
550627e0393fSCarsten Otte 
550727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
550827e0393fSCarsten Otte 			r = -EFAULT;
550927e0393fSCarsten Otte 			break;
551027e0393fSCarsten Otte 		}
551127e0393fSCarsten Otte 
551227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
551327e0393fSCarsten Otte 			r = -EINVAL;
551427e0393fSCarsten Otte 			break;
551527e0393fSCarsten Otte 		}
551627e0393fSCarsten Otte 
551727e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
551827e0393fSCarsten Otte 			ucasmap.length);
551927e0393fSCarsten Otte 		break;
552027e0393fSCarsten Otte 	}
552127e0393fSCarsten Otte #endif
5522ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
5523527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5524ccc7910fSCarsten Otte 		break;
5525ccc7910fSCarsten Otte 	}
5526d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
5527d6712df9SCornelia Huck 	{
5528d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
5529d6712df9SCornelia Huck 		r = -EFAULT;
5530d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
5531d6712df9SCornelia Huck 			break;
5532d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5533d6712df9SCornelia Huck 		break;
5534d6712df9SCornelia Huck 	}
553541408c28SThomas Huth 	case KVM_S390_MEM_OP: {
553641408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
553741408c28SThomas Huth 
553841408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
55390e1234c0SJanis Schoetterl-Glausch 			r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
554041408c28SThomas Huth 		else
554141408c28SThomas Huth 			r = -EFAULT;
554241408c28SThomas Huth 		break;
554341408c28SThomas Huth 	}
5544816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
5545816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5546816c7667SJens Freimann 
5547816c7667SJens Freimann 		r = -EFAULT;
5548816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5549816c7667SJens Freimann 			break;
5550816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5551816c7667SJens Freimann 		    irq_state.len == 0 ||
5552816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5553816c7667SJens Freimann 			r = -EINVAL;
5554816c7667SJens Freimann 			break;
5555816c7667SJens Freimann 		}
5556bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5557816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
5558816c7667SJens Freimann 					   (void __user *) irq_state.buf,
5559816c7667SJens Freimann 					   irq_state.len);
5560816c7667SJens Freimann 		break;
5561816c7667SJens Freimann 	}
5562816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
5563816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5564816c7667SJens Freimann 
5565816c7667SJens Freimann 		r = -EFAULT;
5566816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5567816c7667SJens Freimann 			break;
5568816c7667SJens Freimann 		if (irq_state.len == 0) {
5569816c7667SJens Freimann 			r = -EINVAL;
5570816c7667SJens Freimann 			break;
5571816c7667SJens Freimann 		}
5572bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5573816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
5574816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
5575816c7667SJens Freimann 					   irq_state.len);
5576816c7667SJens Freimann 		break;
5577816c7667SJens Freimann 	}
55788aba0958SJanosch Frank 	case KVM_S390_PV_CPU_COMMAND: {
55798aba0958SJanosch Frank 		struct kvm_pv_cmd cmd;
55808aba0958SJanosch Frank 
55818aba0958SJanosch Frank 		r = -EINVAL;
55828aba0958SJanosch Frank 		if (!is_prot_virt_host())
55838aba0958SJanosch Frank 			break;
55848aba0958SJanosch Frank 
55858aba0958SJanosch Frank 		r = -EFAULT;
55868aba0958SJanosch Frank 		if (copy_from_user(&cmd, argp, sizeof(cmd)))
55878aba0958SJanosch Frank 			break;
55888aba0958SJanosch Frank 
55898aba0958SJanosch Frank 		r = -EINVAL;
55908aba0958SJanosch Frank 		if (cmd.flags)
55918aba0958SJanosch Frank 			break;
55928aba0958SJanosch Frank 
55938aba0958SJanosch Frank 		/* We only handle this cmd right now */
55948aba0958SJanosch Frank 		if (cmd.cmd != KVM_PV_DUMP)
55958aba0958SJanosch Frank 			break;
55968aba0958SJanosch Frank 
55978aba0958SJanosch Frank 		r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
55988aba0958SJanosch Frank 
55998aba0958SJanosch Frank 		/* Always copy over UV rc / rrc data */
56008aba0958SJanosch Frank 		if (copy_to_user((__u8 __user *)argp, &cmd.rc,
56018aba0958SJanosch Frank 				 sizeof(cmd.rc) + sizeof(cmd.rrc)))
56028aba0958SJanosch Frank 			r = -EFAULT;
56038aba0958SJanosch Frank 		break;
56048aba0958SJanosch Frank 	}
5605b0c632dbSHeiko Carstens 	default:
56063e6afcf1SCarsten Otte 		r = -ENOTTY;
5607b0c632dbSHeiko Carstens 	}
56089b062471SChristoffer Dall 
56099b062471SChristoffer Dall 	vcpu_put(vcpu);
5610bc923cc9SAvi Kivity 	return r;
5611b0c632dbSHeiko Carstens }
5612b0c632dbSHeiko Carstens 
56131499fa80SSouptick Joarder vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
56145b1c1493SCarsten Otte {
56155b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
56165b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
56175b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
56185b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
56195b1c1493SCarsten Otte 		get_page(vmf->page);
56205b1c1493SCarsten Otte 		return 0;
56215b1c1493SCarsten Otte 	}
56225b1c1493SCarsten Otte #endif
56235b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
56245b1c1493SCarsten Otte }
56255b1c1493SCarsten Otte 
5626d663b8a2SPaolo Bonzini bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5627d663b8a2SPaolo Bonzini {
5628d663b8a2SPaolo Bonzini 	return true;
5629d663b8a2SPaolo Bonzini }
5630d663b8a2SPaolo Bonzini 
5631b0c632dbSHeiko Carstens /* Section: memory related */
5632f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
5633537a17b3SSean Christopherson 				   const struct kvm_memory_slot *old,
5634537a17b3SSean Christopherson 				   struct kvm_memory_slot *new,
56357b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
5636b0c632dbSHeiko Carstens {
5637ec5c8697SSean Christopherson 	gpa_t size;
5638ec5c8697SSean Christopherson 
5639ec5c8697SSean Christopherson 	/* When we are protected, we should not change the memory slots */
5640ec5c8697SSean Christopherson 	if (kvm_s390_pv_get_handle(kvm))
5641ec5c8697SSean Christopherson 		return -EINVAL;
5642ec5c8697SSean Christopherson 
5643ec5c8697SSean Christopherson 	if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
5644ec5c8697SSean Christopherson 		return 0;
5645cf5b4869SSean Christopherson 
5646dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
5647dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
5648dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
5649dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
5650b0c632dbSHeiko Carstens 
5651cf5b4869SSean Christopherson 	if (new->userspace_addr & 0xffffful)
5652b0c632dbSHeiko Carstens 		return -EINVAL;
5653b0c632dbSHeiko Carstens 
5654ec5c8697SSean Christopherson 	size = new->npages * PAGE_SIZE;
5655cf5b4869SSean Christopherson 	if (size & 0xffffful)
5656b0c632dbSHeiko Carstens 		return -EINVAL;
5657b0c632dbSHeiko Carstens 
5658cf5b4869SSean Christopherson 	if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5659a3a92c31SDominik Dingel 		return -EINVAL;
5660a3a92c31SDominik Dingel 
5661f7784b8eSMarcelo Tosatti 	return 0;
5662f7784b8eSMarcelo Tosatti }
5663f7784b8eSMarcelo Tosatti 
5664f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
56659d4c197cSSean Christopherson 				struct kvm_memory_slot *old,
5666f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
56678482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
5668f7784b8eSMarcelo Tosatti {
566919ec166cSChristian Borntraeger 	int rc = 0;
5670f7784b8eSMarcelo Tosatti 
567119ec166cSChristian Borntraeger 	switch (change) {
567219ec166cSChristian Borntraeger 	case KVM_MR_DELETE:
567319ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
567419ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
567519ec166cSChristian Borntraeger 		break;
567619ec166cSChristian Borntraeger 	case KVM_MR_MOVE:
567719ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
567819ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
567919ec166cSChristian Borntraeger 		if (rc)
568019ec166cSChristian Borntraeger 			break;
56813b684a42SJoe Perches 		fallthrough;
568219ec166cSChristian Borntraeger 	case KVM_MR_CREATE:
5683cf5b4869SSean Christopherson 		rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5684cf5b4869SSean Christopherson 				      new->base_gfn * PAGE_SIZE,
5685cf5b4869SSean Christopherson 				      new->npages * PAGE_SIZE);
568619ec166cSChristian Borntraeger 		break;
568719ec166cSChristian Borntraeger 	case KVM_MR_FLAGS_ONLY:
568819ec166cSChristian Borntraeger 		break;
568919ec166cSChristian Borntraeger 	default:
569019ec166cSChristian Borntraeger 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
569119ec166cSChristian Borntraeger 	}
5692598841caSCarsten Otte 	if (rc)
5693ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
5694598841caSCarsten Otte 	return;
5695b0c632dbSHeiko Carstens }
5696b0c632dbSHeiko Carstens 
569760a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
569860a37709SAlexander Yarygin {
569960a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
570060a37709SAlexander Yarygin 
570160a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
570260a37709SAlexander Yarygin }
570360a37709SAlexander Yarygin 
5704b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
5705b0c632dbSHeiko Carstens {
570660a37709SAlexander Yarygin 	int i;
570760a37709SAlexander Yarygin 
570807197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
57098d43d570SMichael Mueller 		pr_info("SIE is not available\n");
571007197fd0SDavid Hildenbrand 		return -ENODEV;
571107197fd0SDavid Hildenbrand 	}
571207197fd0SDavid Hildenbrand 
5713a4499382SJanosch Frank 	if (nested && hpage) {
57148d43d570SMichael Mueller 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5715a4499382SJanosch Frank 		return -EINVAL;
5716a4499382SJanosch Frank 	}
5717a4499382SJanosch Frank 
571860a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
5719c3b9e3e1SChristian Borntraeger 		kvm_s390_fac_base[i] |=
572017e89e13SSven Schnelle 			stfle_fac_list[i] & nonhyp_mask(i);
572160a37709SAlexander Yarygin 
57229d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5723b0c632dbSHeiko Carstens }
5724b0c632dbSHeiko Carstens 
5725b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
5726b0c632dbSHeiko Carstens {
5727b0c632dbSHeiko Carstens 	kvm_exit();
5728b0c632dbSHeiko Carstens }
5729b0c632dbSHeiko Carstens 
5730b0c632dbSHeiko Carstens module_init(kvm_s390_init);
5731b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
5732566af940SCornelia Huck 
5733566af940SCornelia Huck /*
5734566af940SCornelia Huck  * Enable autoloading of the kvm module.
5735566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5736566af940SCornelia Huck  * since x86 takes a different approach.
5737566af940SCornelia Huck  */
5738566af940SCornelia Huck #include <linux/miscdevice.h>
5739566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
5740566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
5741