xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision ce55c049459cff0034cc1bcfdce3bf343a2d6317)
1d809aa23SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2b0c632dbSHeiko Carstens /*
3bb64da9aSChristian Borntraeger  * hosting IBM Z kernel virtual machines (s390x)
4b0c632dbSHeiko Carstens  *
53e6c5568SJanosch Frank  * Copyright IBM Corp. 2008, 2020
6b0c632dbSHeiko Carstens  *
7b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
9b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
10628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1115f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
12b0c632dbSHeiko Carstens  */
13b0c632dbSHeiko Carstens 
147aedd9d4SMichael Mueller #define KMSG_COMPONENT "kvm-s390"
157aedd9d4SMichael Mueller #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
167aedd9d4SMichael Mueller 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b2d73b2aSMartin Schwidefsky #include <linux/mman.h>
25b0c632dbSHeiko Carstens #include <linux/module.h>
26d3217967SPaul Gortmaker #include <linux/moduleparam.h>
27a374e892STony Krowiak #include <linux/random.h>
28b0c632dbSHeiko Carstens #include <linux/slab.h>
29ba5c1e9bSCarsten Otte #include <linux/timer.h>
3041408c28SThomas Huth #include <linux/vmalloc.h>
3115c9705fSDavid Hildenbrand #include <linux/bitmap.h>
32174cd4b1SIngo Molnar #include <linux/sched/signal.h>
33190df4a2SClaudio Imbrenda #include <linux/string.h>
3465fddcfcSMike Rapoport #include <linux/pgtable.h>
35174cd4b1SIngo Molnar 
36cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
37b0c632dbSHeiko Carstens #include <asm/lowcore.h>
38fd5ada04SMartin Schwidefsky #include <asm/stp.h>
391e133ab2SMartin Schwidefsky #include <asm/gmap.h>
40f5daba1dSHeiko Carstens #include <asm/nmi.h>
41a0616cdeSDavid Howells #include <asm/switch_to.h>
426d3da241SJens Freimann #include <asm/isc.h>
431526bf9cSChristian Borntraeger #include <asm/sclp.h>
440a763c78SDavid Hildenbrand #include <asm/cpacf.h>
45221bb8a4SLinus Torvalds #include <asm/timex.h>
46e585b24aSTony Krowiak #include <asm/ap.h>
4729b40f10SJanosch Frank #include <asm/uv.h>
4856e62a73SSven Schnelle #include <asm/fpu/api.h>
498f2abe6aSChristian Borntraeger #include "kvm-s390.h"
50b0c632dbSHeiko Carstens #include "gaccess.h"
51b0c632dbSHeiko Carstens 
525786fffaSCornelia Huck #define CREATE_TRACE_POINTS
535786fffaSCornelia Huck #include "trace.h"
54ade38c31SCornelia Huck #include "trace-s390.h"
555786fffaSCornelia Huck 
5641408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
57816c7667SJens Freimann #define LOCAL_IRQS 32
58816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
6041408c28SThomas Huth 
61fcfe1baeSJing Zhang const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62fcfe1baeSJing Zhang 	KVM_GENERIC_VM_STATS(),
63fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_io),
64fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_float_mchk),
65fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_pfault_done),
66fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_service_signal),
67fcfe1baeSJing Zhang 	STATS_DESC_COUNTER(VM, inject_virtio)
68fcfe1baeSJing Zhang };
69fcfe1baeSJing Zhang static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
70fcfe1baeSJing Zhang 		sizeof(struct kvm_vm_stat) / sizeof(u64));
71fcfe1baeSJing Zhang 
72fcfe1baeSJing Zhang const struct kvm_stats_header kvm_vm_stats_header = {
73fcfe1baeSJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
74fcfe1baeSJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75fcfe1baeSJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
76fcfe1baeSJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77fcfe1baeSJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78fcfe1baeSJing Zhang 		       sizeof(kvm_vm_stats_desc),
79fcfe1baeSJing Zhang };
80fcfe1baeSJing Zhang 
81*ce55c049SJing Zhang const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82*ce55c049SJing Zhang 	KVM_GENERIC_VCPU_STATS(),
83*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_userspace),
84*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_null),
85*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_request),
86*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_io_request),
87*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_stop_request),
89*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_validity),
90*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instruction),
91*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_pei),
92*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctl),
94*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctl),
96*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stctg),
97*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_ckc),
101*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_cputm),
102*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_external_call),
103*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_virtio),
106*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_program),
110*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_io),
111*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, exit_wait_state),
113*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_ckc),
114*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_cputm),
115*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_external_call),
116*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_mchk),
118*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_program),
120*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_restart),
121*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_epsw),
124*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_gs),
125*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_io_other),
126*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ptff),
130*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sck),
131*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stidp),
133*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_spx),
134*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stpx),
135*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stap),
136*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_iske),
137*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ri),
138*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sske),
140*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stsi),
142*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_stfl),
143*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tb),
144*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tpi),
145*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tprot),
146*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_tsch),
147*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sie),
148*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_essa),
149*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
166*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_10),
167*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_44),
168*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_9c),
169*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
170*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
171*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_258),
172*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_308),
173*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_500),
174*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, diagnose_other),
175*ce55c049SJing Zhang 	STATS_DESC_COUNTER(VCPU, pfault_sync)
176*ce55c049SJing Zhang };
177*ce55c049SJing Zhang static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
178*ce55c049SJing Zhang 		sizeof(struct kvm_vcpu_stat) / sizeof(u64));
179*ce55c049SJing Zhang 
180*ce55c049SJing Zhang const struct kvm_stats_header kvm_vcpu_stats_header = {
181*ce55c049SJing Zhang 	.name_size = KVM_STATS_NAME_SIZE,
182*ce55c049SJing Zhang 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
183*ce55c049SJing Zhang 	.id_offset = sizeof(struct kvm_stats_header),
184*ce55c049SJing Zhang 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
185*ce55c049SJing Zhang 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
186*ce55c049SJing Zhang 		       sizeof(kvm_vcpu_stats_desc),
187*ce55c049SJing Zhang };
188*ce55c049SJing Zhang 
189b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
190812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("userspace_handled", exit_userspace),
191812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_null", exit_null),
19250a05be4SChristian Borntraeger 	VCPU_STAT("pfault_sync", pfault_sync),
193812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_validity", exit_validity),
194812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_stop_request", exit_stop_request),
195812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_external_request", exit_external_request),
196812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_io_request", exit_io_request),
197812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
198812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_instruction", exit_instruction),
199812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_pei", exit_pei),
200812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_program_interruption", exit_program_interruption),
201812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
202812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_operation_exception", exit_operation_exception),
2030193cc90SJing Zhang 	VCPU_STAT("halt_successful_poll", generic.halt_successful_poll),
2040193cc90SJing Zhang 	VCPU_STAT("halt_attempted_poll", generic.halt_attempted_poll),
2050193cc90SJing Zhang 	VCPU_STAT("halt_poll_invalid", generic.halt_poll_invalid),
206812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
2070193cc90SJing Zhang 	VCPU_STAT("halt_wakeup", generic.halt_wakeup),
2080193cc90SJing Zhang 	VCPU_STAT("halt_poll_success_ns", generic.halt_poll_success_ns),
2090193cc90SJing Zhang 	VCPU_STAT("halt_poll_fail_ns", generic.halt_poll_fail_ns),
210812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_lctlg", instruction_lctlg),
211812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_lctl", instruction_lctl),
212812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stctl", instruction_stctl),
213812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stctg", instruction_stctg),
214812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_ckc", deliver_ckc),
215812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_cputm", deliver_cputm),
216812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
217812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_external_call", deliver_external_call),
218812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_service_signal", deliver_service_signal),
219812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_virtio", deliver_virtio),
220812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
221812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
222812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
223812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_program", deliver_program),
224812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_io", deliver_io),
225812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("deliver_machine_check", deliver_machine_check),
226812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("exit_wait_state", exit_wait_state),
227812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_ckc", inject_ckc),
228812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_cputm", inject_cputm),
229812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_external_call", inject_external_call),
230812756a8SEmanuele Giuseppe Esposito 	VM_STAT("inject_float_mchk", inject_float_mchk),
231812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
232812756a8SEmanuele Giuseppe Esposito 	VM_STAT("inject_io", inject_io),
233812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_mchk", inject_mchk),
234812756a8SEmanuele Giuseppe Esposito 	VM_STAT("inject_pfault_done", inject_pfault_done),
235812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_program", inject_program),
236812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_restart", inject_restart),
237812756a8SEmanuele Giuseppe Esposito 	VM_STAT("inject_service_signal", inject_service_signal),
238812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_set_prefix", inject_set_prefix),
239812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_stop_signal", inject_stop_signal),
240812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("inject_pfault_init", inject_pfault_init),
241812756a8SEmanuele Giuseppe Esposito 	VM_STAT("inject_virtio", inject_virtio),
242812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_epsw", instruction_epsw),
243812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_gs", instruction_gs),
244812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_io_other", instruction_io_other),
245812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_lpsw", instruction_lpsw),
246812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_lpswe", instruction_lpswe),
247812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_pfmf", instruction_pfmf),
248812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_ptff", instruction_ptff),
249812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stidp", instruction_stidp),
250812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sck", instruction_sck),
251812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sckpf", instruction_sckpf),
252812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_spx", instruction_spx),
253812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stpx", instruction_stpx),
254812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stap", instruction_stap),
255812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_iske", instruction_iske),
256812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_ri", instruction_ri),
257812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_rrbe", instruction_rrbe),
258812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sske", instruction_sske),
259812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
260812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_essa", instruction_essa),
261812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stsi", instruction_stsi),
262812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_stfl", instruction_stfl),
263812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_tb", instruction_tb),
264812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_tpi", instruction_tpi),
265812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_tprot", instruction_tprot),
266812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_tsch", instruction_tsch),
267812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sthyi", instruction_sthyi),
268812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sie", instruction_sie),
269812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
270812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
271812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
272812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
273812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
274812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
275812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
276812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
277812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
278812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
279812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
280812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
281812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
282812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
283812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
284812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
285812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_10", diagnose_10),
286812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_44", diagnose_44),
287812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_9c", diagnose_9c),
288812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
28987e28a15SPierre Morel 	VCPU_STAT("diag_9c_forward", diagnose_9c_forward),
290812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_258", diagnose_258),
291812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_308", diagnose_308),
292812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_500", diagnose_500),
293812756a8SEmanuele Giuseppe Esposito 	VCPU_STAT("instruction_diag_other", diagnose_other),
294b0c632dbSHeiko Carstens 	{ NULL }
295b0c632dbSHeiko Carstens };
296b0c632dbSHeiko Carstens 
297a411edf1SDavid Hildenbrand /* allow nested virtualization in KVM (if enabled by user space) */
298a411edf1SDavid Hildenbrand static int nested;
299a411edf1SDavid Hildenbrand module_param(nested, int, S_IRUGO);
300a411edf1SDavid Hildenbrand MODULE_PARM_DESC(nested, "Nested virtualization support");
301a411edf1SDavid Hildenbrand 
302a4499382SJanosch Frank /* allow 1m huge page guest backing, if !nested */
303a4499382SJanosch Frank static int hpage;
304a4499382SJanosch Frank module_param(hpage, int, 0444);
305a4499382SJanosch Frank MODULE_PARM_DESC(hpage, "1m huge page backing support");
306b0c632dbSHeiko Carstens 
3078b905d28SChristian Borntraeger /* maximum percentage of steal time for polling.  >100 is treated like 100 */
3088b905d28SChristian Borntraeger static u8 halt_poll_max_steal = 10;
3098b905d28SChristian Borntraeger module_param(halt_poll_max_steal, byte, 0644);
310b41fb528SWei Yongjun MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
3118b905d28SChristian Borntraeger 
312cc674ef2SMichael Mueller /* if set to true, the GISA will be initialized and used if available */
313cc674ef2SMichael Mueller static bool use_gisa  = true;
314cc674ef2SMichael Mueller module_param(use_gisa, bool, 0644);
315cc674ef2SMichael Mueller MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
316cc674ef2SMichael Mueller 
31787e28a15SPierre Morel /* maximum diag9c forwarding per second */
31887e28a15SPierre Morel unsigned int diag9c_forwarding_hz;
31987e28a15SPierre Morel module_param(diag9c_forwarding_hz, uint, 0644);
32087e28a15SPierre Morel MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
32187e28a15SPierre Morel 
322c3b9e3e1SChristian Borntraeger /*
323c3b9e3e1SChristian Borntraeger  * For now we handle at most 16 double words as this is what the s390 base
324c3b9e3e1SChristian Borntraeger  * kernel handles and stores in the prefix page. If we ever need to go beyond
325c3b9e3e1SChristian Borntraeger  * this, this requires changes to code, but the external uapi can stay.
326c3b9e3e1SChristian Borntraeger  */
327c3b9e3e1SChristian Borntraeger #define SIZE_INTERNAL 16
328c3b9e3e1SChristian Borntraeger 
329c3b9e3e1SChristian Borntraeger /*
330c3b9e3e1SChristian Borntraeger  * Base feature mask that defines default mask for facilities. Consists of the
331c3b9e3e1SChristian Borntraeger  * defines in FACILITIES_KVM and the non-hypervisor managed bits.
332c3b9e3e1SChristian Borntraeger  */
333c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
334c3b9e3e1SChristian Borntraeger /*
335c3b9e3e1SChristian Borntraeger  * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
336c3b9e3e1SChristian Borntraeger  * and defines the facilities that can be enabled via a cpu model.
337c3b9e3e1SChristian Borntraeger  */
338c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
339c3b9e3e1SChristian Borntraeger 
340c3b9e3e1SChristian Borntraeger static unsigned long kvm_s390_fac_size(void)
34178c4b59fSMichael Mueller {
342c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
343c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
344c3b9e3e1SChristian Borntraeger 	BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
345c3b9e3e1SChristian Borntraeger 		sizeof(S390_lowcore.stfle_fac_list));
346c3b9e3e1SChristian Borntraeger 
347c3b9e3e1SChristian Borntraeger 	return SIZE_INTERNAL;
34878c4b59fSMichael Mueller }
34978c4b59fSMichael Mueller 
35015c9705fSDavid Hildenbrand /* available cpu features supported by kvm */
35115c9705fSDavid Hildenbrand static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
3520a763c78SDavid Hildenbrand /* available subfunctions indicated via query / "test bit" */
3530a763c78SDavid Hildenbrand static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
35415c9705fSDavid Hildenbrand 
3559d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
356a3508fbeSDavid Hildenbrand static struct gmap_notifier vsie_gmap_notifier;
35778f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
3583e6c5568SJanosch Frank debug_info_t *kvm_s390_dbf_uv;
3599d8d5786SMichael Mueller 
360b0c632dbSHeiko Carstens /* Section: not file related */
36113a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
362b0c632dbSHeiko Carstens {
363b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
36410474ae8SAlexander Graf 	return 0;
365b0c632dbSHeiko Carstens }
366b0c632dbSHeiko Carstens 
367b9904085SSean Christopherson int kvm_arch_check_processor_compat(void *opaque)
368f257d6dcSSean Christopherson {
369f257d6dcSSean Christopherson 	return 0;
370f257d6dcSSean Christopherson }
371f257d6dcSSean Christopherson 
37229b40f10SJanosch Frank /* forward declarations */
373414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
374414d3b07SMartin Schwidefsky 			      unsigned long end);
37529b40f10SJanosch Frank static int sca_switch_to_extended(struct kvm *kvm);
3762c70fe44SChristian Borntraeger 
3771575767eSDavid Hildenbrand static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
3781575767eSDavid Hildenbrand {
3791575767eSDavid Hildenbrand 	u8 delta_idx = 0;
3801575767eSDavid Hildenbrand 
3811575767eSDavid Hildenbrand 	/*
3821575767eSDavid Hildenbrand 	 * The TOD jumps by delta, we have to compensate this by adding
3831575767eSDavid Hildenbrand 	 * -delta to the epoch.
3841575767eSDavid Hildenbrand 	 */
3851575767eSDavid Hildenbrand 	delta = -delta;
3861575767eSDavid Hildenbrand 
3871575767eSDavid Hildenbrand 	/* sign-extension - we're adding to signed values below */
3881575767eSDavid Hildenbrand 	if ((s64)delta < 0)
3891575767eSDavid Hildenbrand 		delta_idx = -1;
3901575767eSDavid Hildenbrand 
3911575767eSDavid Hildenbrand 	scb->epoch += delta;
3921575767eSDavid Hildenbrand 	if (scb->ecd & ECD_MEF) {
3931575767eSDavid Hildenbrand 		scb->epdx += delta_idx;
3941575767eSDavid Hildenbrand 		if (scb->epoch < delta)
3951575767eSDavid Hildenbrand 			scb->epdx += 1;
3961575767eSDavid Hildenbrand 	}
3971575767eSDavid Hildenbrand }
3981575767eSDavid Hildenbrand 
399fdf03650SFan Zhang /*
400fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
401fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
402fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
403fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
404fdf03650SFan Zhang  */
405fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
406fdf03650SFan Zhang 			  void *v)
407fdf03650SFan Zhang {
408fdf03650SFan Zhang 	struct kvm *kvm;
409fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
410fdf03650SFan Zhang 	int i;
411fdf03650SFan Zhang 	unsigned long long *delta = v;
412fdf03650SFan Zhang 
413fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
414fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
4151575767eSDavid Hildenbrand 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
4161575767eSDavid Hildenbrand 			if (i == 0) {
4171575767eSDavid Hildenbrand 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
4181575767eSDavid Hildenbrand 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
4191575767eSDavid Hildenbrand 			}
420db0758b2SDavid Hildenbrand 			if (vcpu->arch.cputm_enabled)
421db0758b2SDavid Hildenbrand 				vcpu->arch.cputm_start += *delta;
42291473b48SDavid Hildenbrand 			if (vcpu->arch.vsie_block)
4231575767eSDavid Hildenbrand 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
4241575767eSDavid Hildenbrand 						   *delta);
425fdf03650SFan Zhang 		}
426fdf03650SFan Zhang 	}
427fdf03650SFan Zhang 	return NOTIFY_OK;
428fdf03650SFan Zhang }
429fdf03650SFan Zhang 
430fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
431fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
432fdf03650SFan Zhang };
433fdf03650SFan Zhang 
434b9904085SSean Christopherson int kvm_arch_hardware_setup(void *opaque)
435b0c632dbSHeiko Carstens {
4362c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
437b2d73b2aSMartin Schwidefsky 	gmap_register_pte_notifier(&gmap_notifier);
438a3508fbeSDavid Hildenbrand 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
439a3508fbeSDavid Hildenbrand 	gmap_register_pte_notifier(&vsie_gmap_notifier);
440fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
441fdf03650SFan Zhang 				       &kvm_clock_notifier);
442b0c632dbSHeiko Carstens 	return 0;
443b0c632dbSHeiko Carstens }
444b0c632dbSHeiko Carstens 
445b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
446b0c632dbSHeiko Carstens {
447b2d73b2aSMartin Schwidefsky 	gmap_unregister_pte_notifier(&gmap_notifier);
448a3508fbeSDavid Hildenbrand 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
449fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
450fdf03650SFan Zhang 					 &kvm_clock_notifier);
451b0c632dbSHeiko Carstens }
452b0c632dbSHeiko Carstens 
45322be5a13SDavid Hildenbrand static void allow_cpu_feat(unsigned long nr)
45422be5a13SDavid Hildenbrand {
45522be5a13SDavid Hildenbrand 	set_bit_inv(nr, kvm_s390_available_cpu_feat);
45622be5a13SDavid Hildenbrand }
45722be5a13SDavid Hildenbrand 
4580a763c78SDavid Hildenbrand static inline int plo_test_bit(unsigned char nr)
4590a763c78SDavid Hildenbrand {
4600a763c78SDavid Hildenbrand 	register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
461d051ae53SHeiko Carstens 	int cc;
4620a763c78SDavid Hildenbrand 
4630a763c78SDavid Hildenbrand 	asm volatile(
4640a763c78SDavid Hildenbrand 		/* Parameter registers are ignored for "test bit" */
4650a763c78SDavid Hildenbrand 		"	plo	0,0,0,0(0)\n"
4660a763c78SDavid Hildenbrand 		"	ipm	%0\n"
4670a763c78SDavid Hildenbrand 		"	srl	%0,28\n"
4680a763c78SDavid Hildenbrand 		: "=d" (cc)
4690a763c78SDavid Hildenbrand 		: "d" (r0)
4700a763c78SDavid Hildenbrand 		: "cc");
4710a763c78SDavid Hildenbrand 	return cc == 0;
4720a763c78SDavid Hildenbrand }
4730a763c78SDavid Hildenbrand 
474d0dea733SHeiko Carstens static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
475d6681397SChristian Borntraeger {
476d6681397SChristian Borntraeger 	register unsigned long r0 asm("0") = 0;	/* query function */
477d6681397SChristian Borntraeger 	register unsigned long r1 asm("1") = (unsigned long) query;
478d6681397SChristian Borntraeger 
479d6681397SChristian Borntraeger 	asm volatile(
480d6681397SChristian Borntraeger 		/* Parameter regs are ignored */
481d6681397SChristian Borntraeger 		"	.insn	rrf,%[opc] << 16,2,4,6,0\n"
482b1c41ac3SHeiko Carstens 		:
483d6681397SChristian Borntraeger 		: "d" (r0), "a" (r1), [opc] "i" (opcode)
484b1c41ac3SHeiko Carstens 		: "cc", "memory");
485d6681397SChristian Borntraeger }
486d6681397SChristian Borntraeger 
487173aec2dSChristian Borntraeger #define INSN_SORTL 0xb938
4884f45b90eSChristian Borntraeger #define INSN_DFLTCC 0xb939
489173aec2dSChristian Borntraeger 
49022be5a13SDavid Hildenbrand static void kvm_s390_cpu_feat_init(void)
49122be5a13SDavid Hildenbrand {
4920a763c78SDavid Hildenbrand 	int i;
4930a763c78SDavid Hildenbrand 
4940a763c78SDavid Hildenbrand 	for (i = 0; i < 256; ++i) {
4950a763c78SDavid Hildenbrand 		if (plo_test_bit(i))
4960a763c78SDavid Hildenbrand 			kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
4970a763c78SDavid Hildenbrand 	}
4980a763c78SDavid Hildenbrand 
4990a763c78SDavid Hildenbrand 	if (test_facility(28)) /* TOD-clock steering */
500221bb8a4SLinus Torvalds 		ptff(kvm_s390_available_subfunc.ptff,
501221bb8a4SLinus Torvalds 		     sizeof(kvm_s390_available_subfunc.ptff),
502221bb8a4SLinus Torvalds 		     PTFF_QAF);
5030a763c78SDavid Hildenbrand 
5040a763c78SDavid Hildenbrand 	if (test_facility(17)) { /* MSA */
50569c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
50669c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmac);
50769c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMC, (cpacf_mask_t *)
50869c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmc);
50969c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KM, (cpacf_mask_t *)
51069c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.km);
51169c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
51269c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kimd);
51369c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
51469c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.klmd);
5150a763c78SDavid Hildenbrand 	}
5160a763c78SDavid Hildenbrand 	if (test_facility(76)) /* MSA3 */
51769c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
51869c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pckmo);
5190a763c78SDavid Hildenbrand 	if (test_facility(77)) { /* MSA4 */
52069c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
52169c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmctr);
52269c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMF, (cpacf_mask_t *)
52369c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmf);
52469c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_KMO, (cpacf_mask_t *)
52569c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.kmo);
52669c0e360SMartin Schwidefsky 		__cpacf_query(CPACF_PCC, (cpacf_mask_t *)
52769c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.pcc);
5280a763c78SDavid Hildenbrand 	}
5290a763c78SDavid Hildenbrand 	if (test_facility(57)) /* MSA5 */
530985a9d20SHarald Freudenberger 		__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
53169c0e360SMartin Schwidefsky 			      kvm_s390_available_subfunc.ppno);
5320a763c78SDavid Hildenbrand 
533e000b8e0SJason J. Herne 	if (test_facility(146)) /* MSA8 */
534e000b8e0SJason J. Herne 		__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
535e000b8e0SJason J. Herne 			      kvm_s390_available_subfunc.kma);
536e000b8e0SJason J. Herne 
53713209ad0SChristian Borntraeger 	if (test_facility(155)) /* MSA9 */
53813209ad0SChristian Borntraeger 		__cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
53913209ad0SChristian Borntraeger 			      kvm_s390_available_subfunc.kdsa);
54013209ad0SChristian Borntraeger 
541173aec2dSChristian Borntraeger 	if (test_facility(150)) /* SORTL */
542173aec2dSChristian Borntraeger 		__insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
543173aec2dSChristian Borntraeger 
5444f45b90eSChristian Borntraeger 	if (test_facility(151)) /* DFLTCC */
5454f45b90eSChristian Borntraeger 		__insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
5464f45b90eSChristian Borntraeger 
54722be5a13SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
54822be5a13SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
549a3508fbeSDavid Hildenbrand 	/*
550a3508fbeSDavid Hildenbrand 	 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
551a3508fbeSDavid Hildenbrand 	 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
552a3508fbeSDavid Hildenbrand 	 */
553a3508fbeSDavid Hildenbrand 	if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
554a411edf1SDavid Hildenbrand 	    !test_facility(3) || !nested)
555a3508fbeSDavid Hildenbrand 		return;
556a3508fbeSDavid Hildenbrand 	allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
55719c439b5SDavid Hildenbrand 	if (sclp.has_64bscao)
55819c439b5SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
5590615a326SDavid Hildenbrand 	if (sclp.has_siif)
5600615a326SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
56177d18f6dSDavid Hildenbrand 	if (sclp.has_gpere)
56277d18f6dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
563a1b7b9b2SDavid Hildenbrand 	if (sclp.has_gsls)
564a1b7b9b2SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
5655630a8e8SDavid Hildenbrand 	if (sclp.has_ib)
5665630a8e8SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
56713ee3f67SDavid Hildenbrand 	if (sclp.has_cei)
56813ee3f67SDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
5697fd7f39dSDavid Hildenbrand 	if (sclp.has_ibs)
5707fd7f39dSDavid Hildenbrand 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
571730cd632SFarhan Ali 	if (sclp.has_kss)
572730cd632SFarhan Ali 		allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
5735d3876a8SDavid Hildenbrand 	/*
5745d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
5755d3876a8SDavid Hildenbrand 	 * all skey handling functions read/set the skey from the PGSTE
5765d3876a8SDavid Hildenbrand 	 * instead of the real storage key.
5775d3876a8SDavid Hildenbrand 	 *
5785d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
5795d3876a8SDavid Hildenbrand 	 * pages being detected as preserved although they are resident.
5805d3876a8SDavid Hildenbrand 	 *
5815d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
5825d3876a8SDavid Hildenbrand 	 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
5835d3876a8SDavid Hildenbrand 	 *
5845d3876a8SDavid Hildenbrand 	 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
5855d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
5865d3876a8SDavid Hildenbrand 	 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
5875d3876a8SDavid Hildenbrand 	 *
5885d3876a8SDavid Hildenbrand 	 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
5895d3876a8SDavid Hildenbrand 	 * cannot easily shadow the SCA because of the ipte lock.
5905d3876a8SDavid Hildenbrand 	 */
59122be5a13SDavid Hildenbrand }
59222be5a13SDavid Hildenbrand 
593b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
594b0c632dbSHeiko Carstens {
595f76f6371SJanosch Frank 	int rc = -ENOMEM;
596308c3e66SMichael Mueller 
59778f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
59878f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
59978f26131SChristian Borntraeger 		return -ENOMEM;
60078f26131SChristian Borntraeger 
6013e6c5568SJanosch Frank 	kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
6023e6c5568SJanosch Frank 	if (!kvm_s390_dbf_uv)
6033e6c5568SJanosch Frank 		goto out;
6043e6c5568SJanosch Frank 
6053e6c5568SJanosch Frank 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
6063e6c5568SJanosch Frank 	    debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
607f76f6371SJanosch Frank 		goto out;
60878f26131SChristian Borntraeger 
60922be5a13SDavid Hildenbrand 	kvm_s390_cpu_feat_init();
61022be5a13SDavid Hildenbrand 
61184877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
612308c3e66SMichael Mueller 	rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
613308c3e66SMichael Mueller 	if (rc) {
6148d43d570SMichael Mueller 		pr_err("A FLIC registration call failed with rc=%d\n", rc);
615f76f6371SJanosch Frank 		goto out;
616308c3e66SMichael Mueller 	}
617b1d1e76eSMichael Mueller 
618b1d1e76eSMichael Mueller 	rc = kvm_s390_gib_init(GAL_ISC);
619b1d1e76eSMichael Mueller 	if (rc)
620f76f6371SJanosch Frank 		goto out;
621b1d1e76eSMichael Mueller 
622308c3e66SMichael Mueller 	return 0;
623308c3e66SMichael Mueller 
624f76f6371SJanosch Frank out:
625f76f6371SJanosch Frank 	kvm_arch_exit();
626308c3e66SMichael Mueller 	return rc;
627b0c632dbSHeiko Carstens }
628b0c632dbSHeiko Carstens 
62978f26131SChristian Borntraeger void kvm_arch_exit(void)
63078f26131SChristian Borntraeger {
6311282c21eSMichael Mueller 	kvm_s390_gib_destroy();
63278f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
6333e6c5568SJanosch Frank 	debug_unregister(kvm_s390_dbf_uv);
63478f26131SChristian Borntraeger }
63578f26131SChristian Borntraeger 
636b0c632dbSHeiko Carstens /* Section: device related */
637b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
638b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
639b0c632dbSHeiko Carstens {
640b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
641b0c632dbSHeiko Carstens 		return s390_enable_sie();
642b0c632dbSHeiko Carstens 	return -EINVAL;
643b0c632dbSHeiko Carstens }
644b0c632dbSHeiko Carstens 
645784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
646b0c632dbSHeiko Carstens {
647d7b0b5ebSCarsten Otte 	int r;
648d7b0b5ebSCarsten Otte 
6492bd0ac4eSCarsten Otte 	switch (ext) {
650d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
651b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
65252e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
6531efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
6541efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
6551efd0f59SCarsten Otte #endif
6563c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
65760b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
65814eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
659d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
660fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
66110ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
662c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
66378599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
664f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
6656352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
666460df4c1SPaolo Bonzini 	case KVM_CAP_IMMEDIATE_EXIT:
66747b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
6682444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
669e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
67030ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
671816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
6726502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
6734036e387SClaudio Imbrenda 	case KVM_CAP_S390_CMMA_MIGRATION:
67447a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
675da9a1446SChristian Borntraeger 	case KVM_CAP_S390_AIS_MIGRATION:
6767de3f142SJanosch Frank 	case KVM_CAP_S390_VCPU_RESETS:
677b9b2782cSPeter Xu 	case KVM_CAP_SET_GUEST_DEBUG:
67823a60f83SCollin Walling 	case KVM_CAP_S390_DIAG318:
679d7b0b5ebSCarsten Otte 		r = 1;
680d7b0b5ebSCarsten Otte 		break;
681a43b80b7SMaxim Levitsky 	case KVM_CAP_SET_GUEST_DEBUG2:
682a43b80b7SMaxim Levitsky 		r = KVM_GUESTDBG_VALID_MASK;
683a43b80b7SMaxim Levitsky 		break;
684a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
685a4499382SJanosch Frank 		r = 0;
68640ebdb8eSJanosch Frank 		if (hpage && !kvm_is_ucontrol(kvm))
687a4499382SJanosch Frank 			r = 1;
688a4499382SJanosch Frank 		break;
68941408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
69041408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
69141408c28SThomas Huth 		break;
692e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
693e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
694a86cb413SThomas Huth 	case KVM_CAP_MAX_VCPU_ID:
69576a6dd72SDavid Hildenbrand 		r = KVM_S390_BSCA_CPU_SLOTS;
696a6940674SDavid Hildenbrand 		if (!kvm_s390_use_sca_entries())
697a6940674SDavid Hildenbrand 			r = KVM_MAX_VCPUS;
698a6940674SDavid Hildenbrand 		else if (sclp.has_esca && sclp.has_64bscao)
69976a6dd72SDavid Hildenbrand 			r = KVM_S390_ESCA_CPU_SLOTS;
700e726b1bdSChristian Borntraeger 		break;
7011526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
702abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
7031526bf9cSChristian Borntraeger 		break;
70468c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
70568c55750SEric Farman 		r = MACHINE_HAS_VX;
70668c55750SEric Farman 		break;
707c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
708c6e5f166SFan Zhang 		r = test_facility(64);
709c6e5f166SFan Zhang 		break;
7104e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
7114e0b1ab7SFan Zhang 		r = test_facility(133);
7124e0b1ab7SFan Zhang 		break;
71335b3fde6SChristian Borntraeger 	case KVM_CAP_S390_BPB:
71435b3fde6SChristian Borntraeger 		r = test_facility(82);
71535b3fde6SChristian Borntraeger 		break;
71613da9ae1SChristian Borntraeger 	case KVM_CAP_S390_PROTECTED:
71713da9ae1SChristian Borntraeger 		r = is_prot_virt_host();
71813da9ae1SChristian Borntraeger 		break;
7192bd0ac4eSCarsten Otte 	default:
720d7b0b5ebSCarsten Otte 		r = 0;
721b0c632dbSHeiko Carstens 	}
722d7b0b5ebSCarsten Otte 	return r;
7232bd0ac4eSCarsten Otte }
724b0c632dbSHeiko Carstens 
7250dff0846SSean Christopherson void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
72615f36ebdSJason J. Herne {
7270959e168SJanosch Frank 	int i;
72815f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
7290959e168SJanosch Frank 	unsigned long gaddr, vmaddr;
73015f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
7310959e168SJanosch Frank 	DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
73215f36ebdSJason J. Herne 
7330959e168SJanosch Frank 	/* Loop over all guest segments */
7340959e168SJanosch Frank 	cur_gfn = memslot->base_gfn;
73515f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
7360959e168SJanosch Frank 	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
7370959e168SJanosch Frank 		gaddr = gfn_to_gpa(cur_gfn);
7380959e168SJanosch Frank 		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
7390959e168SJanosch Frank 		if (kvm_is_error_hva(vmaddr))
7400959e168SJanosch Frank 			continue;
74115f36ebdSJason J. Herne 
7420959e168SJanosch Frank 		bitmap_zero(bitmap, _PAGE_ENTRIES);
7430959e168SJanosch Frank 		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
7440959e168SJanosch Frank 		for (i = 0; i < _PAGE_ENTRIES; i++) {
7450959e168SJanosch Frank 			if (test_bit(i, bitmap))
7460959e168SJanosch Frank 				mark_page_dirty(kvm, cur_gfn + i);
7470959e168SJanosch Frank 		}
7480959e168SJanosch Frank 
7491763f8d0SChristian Borntraeger 		if (fatal_signal_pending(current))
7501763f8d0SChristian Borntraeger 			return;
75170c88a00SChristian Borntraeger 		cond_resched();
75215f36ebdSJason J. Herne 	}
75315f36ebdSJason J. Herne }
75415f36ebdSJason J. Herne 
755b0c632dbSHeiko Carstens /* Section: vm related */
756a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu);
757a6e2f683SEugene (jno) Dvurechenski 
758b0c632dbSHeiko Carstens /*
759b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
760b0c632dbSHeiko Carstens  */
761b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
762b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
763b0c632dbSHeiko Carstens {
76415f36ebdSJason J. Herne 	int r;
76515f36ebdSJason J. Herne 	unsigned long n;
76615f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
7672a49f61dSSean Christopherson 	int is_dirty;
76815f36ebdSJason J. Herne 
769e1e8a962SJanosch Frank 	if (kvm_is_ucontrol(kvm))
770e1e8a962SJanosch Frank 		return -EINVAL;
771e1e8a962SJanosch Frank 
77215f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
77315f36ebdSJason J. Herne 
77415f36ebdSJason J. Herne 	r = -EINVAL;
77515f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
77615f36ebdSJason J. Herne 		goto out;
77715f36ebdSJason J. Herne 
7782a49f61dSSean Christopherson 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
77915f36ebdSJason J. Herne 	if (r)
78015f36ebdSJason J. Herne 		goto out;
78115f36ebdSJason J. Herne 
78215f36ebdSJason J. Herne 	/* Clear the dirty log */
78315f36ebdSJason J. Herne 	if (is_dirty) {
78415f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
78515f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
78615f36ebdSJason J. Herne 	}
78715f36ebdSJason J. Herne 	r = 0;
78815f36ebdSJason J. Herne out:
78915f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
79015f36ebdSJason J. Herne 	return r;
791b0c632dbSHeiko Carstens }
792b0c632dbSHeiko Carstens 
7936502a34cSDavid Hildenbrand static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
7946502a34cSDavid Hildenbrand {
7956502a34cSDavid Hildenbrand 	unsigned int i;
7966502a34cSDavid Hildenbrand 	struct kvm_vcpu *vcpu;
7976502a34cSDavid Hildenbrand 
7986502a34cSDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
7996502a34cSDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
8006502a34cSDavid Hildenbrand 	}
8016502a34cSDavid Hildenbrand }
8026502a34cSDavid Hildenbrand 
803e5d83c74SPaolo Bonzini int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
804d938dc55SCornelia Huck {
805d938dc55SCornelia Huck 	int r;
806d938dc55SCornelia Huck 
807d938dc55SCornelia Huck 	if (cap->flags)
808d938dc55SCornelia Huck 		return -EINVAL;
809d938dc55SCornelia Huck 
810d938dc55SCornelia Huck 	switch (cap->cap) {
81184223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
812c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
81384223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
81484223598SCornelia Huck 		r = 0;
81584223598SCornelia Huck 		break;
8162444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
817c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
8182444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
8192444b352SDavid Hildenbrand 		r = 0;
8202444b352SDavid Hildenbrand 		break;
82168c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
8225967c17bSDavid Hildenbrand 		mutex_lock(&kvm->lock);
823a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
8245967c17bSDavid Hildenbrand 			r = -EBUSY;
8255967c17bSDavid Hildenbrand 		} else if (MACHINE_HAS_VX) {
826c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
827c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 129);
8282f87d942SGuenther Hutzl 			if (test_facility(134)) {
8292f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
8302f87d942SGuenther Hutzl 				set_kvm_facility(kvm->arch.model.fac_list, 134);
8312f87d942SGuenther Hutzl 			}
83253743aa7SMaxim Samoylov 			if (test_facility(135)) {
83353743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
83453743aa7SMaxim Samoylov 				set_kvm_facility(kvm->arch.model.fac_list, 135);
83553743aa7SMaxim Samoylov 			}
8367832e91cSChristian Borntraeger 			if (test_facility(148)) {
8377832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
8387832e91cSChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 148);
8397832e91cSChristian Borntraeger 			}
840d5cb6ab1SChristian Borntraeger 			if (test_facility(152)) {
841d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
842d5cb6ab1SChristian Borntraeger 				set_kvm_facility(kvm->arch.model.fac_list, 152);
843d5cb6ab1SChristian Borntraeger 			}
84418280d8bSMichael Mueller 			r = 0;
84518280d8bSMichael Mueller 		} else
84618280d8bSMichael Mueller 			r = -EINVAL;
8475967c17bSDavid Hildenbrand 		mutex_unlock(&kvm->lock);
848c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
849c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
85068c55750SEric Farman 		break;
851c6e5f166SFan Zhang 	case KVM_CAP_S390_RI:
852c6e5f166SFan Zhang 		r = -EINVAL;
853c6e5f166SFan Zhang 		mutex_lock(&kvm->lock);
854a03825bbSPaolo Bonzini 		if (kvm->created_vcpus) {
855c6e5f166SFan Zhang 			r = -EBUSY;
856c6e5f166SFan Zhang 		} else if (test_facility(64)) {
857c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
858c54f0d6aSDavid Hildenbrand 			set_kvm_facility(kvm->arch.model.fac_list, 64);
859c6e5f166SFan Zhang 			r = 0;
860c6e5f166SFan Zhang 		}
861c6e5f166SFan Zhang 		mutex_unlock(&kvm->lock);
862c6e5f166SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
863c6e5f166SFan Zhang 			 r ? "(not available)" : "(success)");
864c6e5f166SFan Zhang 		break;
86547a4693eSYi Min Zhao 	case KVM_CAP_S390_AIS:
86647a4693eSYi Min Zhao 		mutex_lock(&kvm->lock);
86747a4693eSYi Min Zhao 		if (kvm->created_vcpus) {
86847a4693eSYi Min Zhao 			r = -EBUSY;
86947a4693eSYi Min Zhao 		} else {
87047a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
87147a4693eSYi Min Zhao 			set_kvm_facility(kvm->arch.model.fac_list, 72);
87247a4693eSYi Min Zhao 			r = 0;
87347a4693eSYi Min Zhao 		}
87447a4693eSYi Min Zhao 		mutex_unlock(&kvm->lock);
87547a4693eSYi Min Zhao 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
87647a4693eSYi Min Zhao 			 r ? "(not available)" : "(success)");
87747a4693eSYi Min Zhao 		break;
8784e0b1ab7SFan Zhang 	case KVM_CAP_S390_GS:
8794e0b1ab7SFan Zhang 		r = -EINVAL;
8804e0b1ab7SFan Zhang 		mutex_lock(&kvm->lock);
881241e3ec0SChristian Borntraeger 		if (kvm->created_vcpus) {
8824e0b1ab7SFan Zhang 			r = -EBUSY;
8834e0b1ab7SFan Zhang 		} else if (test_facility(133)) {
8844e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
8854e0b1ab7SFan Zhang 			set_kvm_facility(kvm->arch.model.fac_list, 133);
8864e0b1ab7SFan Zhang 			r = 0;
8874e0b1ab7SFan Zhang 		}
8884e0b1ab7SFan Zhang 		mutex_unlock(&kvm->lock);
8894e0b1ab7SFan Zhang 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
8904e0b1ab7SFan Zhang 			 r ? "(not available)" : "(success)");
8914e0b1ab7SFan Zhang 		break;
892a4499382SJanosch Frank 	case KVM_CAP_S390_HPAGE_1M:
893a4499382SJanosch Frank 		mutex_lock(&kvm->lock);
894a4499382SJanosch Frank 		if (kvm->created_vcpus)
895a4499382SJanosch Frank 			r = -EBUSY;
89640ebdb8eSJanosch Frank 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
897a4499382SJanosch Frank 			r = -EINVAL;
898a4499382SJanosch Frank 		else {
899a4499382SJanosch Frank 			r = 0;
900d8ed45c5SMichel Lespinasse 			mmap_write_lock(kvm->mm);
901a4499382SJanosch Frank 			kvm->mm->context.allow_gmap_hpage_1m = 1;
902d8ed45c5SMichel Lespinasse 			mmap_write_unlock(kvm->mm);
903a4499382SJanosch Frank 			/*
904a4499382SJanosch Frank 			 * We might have to create fake 4k page
905a4499382SJanosch Frank 			 * tables. To avoid that the hardware works on
906a4499382SJanosch Frank 			 * stale PGSTEs, we emulate these instructions.
907a4499382SJanosch Frank 			 */
908a4499382SJanosch Frank 			kvm->arch.use_skf = 0;
909a4499382SJanosch Frank 			kvm->arch.use_pfmfi = 0;
910a4499382SJanosch Frank 		}
911a4499382SJanosch Frank 		mutex_unlock(&kvm->lock);
912a4499382SJanosch Frank 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
913a4499382SJanosch Frank 			 r ? "(not available)" : "(success)");
914a4499382SJanosch Frank 		break;
915e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
916c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
917e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
918e44fc8c9SEkaterina Tumanova 		r = 0;
919e44fc8c9SEkaterina Tumanova 		break;
9206502a34cSDavid Hildenbrand 	case KVM_CAP_S390_USER_INSTR0:
9216502a34cSDavid Hildenbrand 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
9226502a34cSDavid Hildenbrand 		kvm->arch.user_instr0 = 1;
9236502a34cSDavid Hildenbrand 		icpt_operexc_on_all_vcpus(kvm);
9246502a34cSDavid Hildenbrand 		r = 0;
9256502a34cSDavid Hildenbrand 		break;
926d938dc55SCornelia Huck 	default:
927d938dc55SCornelia Huck 		r = -EINVAL;
928d938dc55SCornelia Huck 		break;
929d938dc55SCornelia Huck 	}
930d938dc55SCornelia Huck 	return r;
931d938dc55SCornelia Huck }
932d938dc55SCornelia Huck 
9338c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
9348c0a7ce6SDominik Dingel {
9358c0a7ce6SDominik Dingel 	int ret;
9368c0a7ce6SDominik Dingel 
9378c0a7ce6SDominik Dingel 	switch (attr->attr) {
9388c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
9398c0a7ce6SDominik Dingel 		ret = 0;
940c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
941a3a92c31SDominik Dingel 			 kvm->arch.mem_limit);
942a3a92c31SDominik Dingel 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
9438c0a7ce6SDominik Dingel 			ret = -EFAULT;
9448c0a7ce6SDominik Dingel 		break;
9458c0a7ce6SDominik Dingel 	default:
9468c0a7ce6SDominik Dingel 		ret = -ENXIO;
9478c0a7ce6SDominik Dingel 		break;
9488c0a7ce6SDominik Dingel 	}
9498c0a7ce6SDominik Dingel 	return ret;
9508c0a7ce6SDominik Dingel }
9518c0a7ce6SDominik Dingel 
9528c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
9534f718eabSDominik Dingel {
9544f718eabSDominik Dingel 	int ret;
9554f718eabSDominik Dingel 	unsigned int idx;
9564f718eabSDominik Dingel 	switch (attr->attr) {
9574f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
958f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
959c24cc9c8SDavid Hildenbrand 		if (!sclp.has_cmma)
960e6db1d61SDominik Dingel 			break;
961e6db1d61SDominik Dingel 
962c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
9634f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
964a4499382SJanosch Frank 		if (kvm->created_vcpus)
965a4499382SJanosch Frank 			ret = -EBUSY;
966a4499382SJanosch Frank 		else if (kvm->mm->context.allow_gmap_hpage_1m)
967a4499382SJanosch Frank 			ret = -EINVAL;
968a4499382SJanosch Frank 		else {
9694f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
970c9f0a2b8SJanosch Frank 			/* Not compatible with cmma. */
971c9f0a2b8SJanosch Frank 			kvm->arch.use_pfmfi = 0;
9724f718eabSDominik Dingel 			ret = 0;
9734f718eabSDominik Dingel 		}
9744f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9754f718eabSDominik Dingel 		break;
9764f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
977f9cbd9b0SDavid Hildenbrand 		ret = -ENXIO;
978f9cbd9b0SDavid Hildenbrand 		if (!sclp.has_cmma)
979f9cbd9b0SDavid Hildenbrand 			break;
980c3489155SDominik Dingel 		ret = -EINVAL;
981c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
982c3489155SDominik Dingel 			break;
983c3489155SDominik Dingel 
984c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
9854f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
9864f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
987a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
9884f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
9894f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
9904f718eabSDominik Dingel 		ret = 0;
9914f718eabSDominik Dingel 		break;
9928c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
9938c0a7ce6SDominik Dingel 		unsigned long new_limit;
9948c0a7ce6SDominik Dingel 
9958c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
9968c0a7ce6SDominik Dingel 			return -EINVAL;
9978c0a7ce6SDominik Dingel 
9988c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
9998c0a7ce6SDominik Dingel 			return -EFAULT;
10008c0a7ce6SDominik Dingel 
1001a3a92c31SDominik Dingel 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
1002a3a92c31SDominik Dingel 		    new_limit > kvm->arch.mem_limit)
10038c0a7ce6SDominik Dingel 			return -E2BIG;
10048c0a7ce6SDominik Dingel 
1005a3a92c31SDominik Dingel 		if (!new_limit)
1006a3a92c31SDominik Dingel 			return -EINVAL;
1007a3a92c31SDominik Dingel 
10086ea427bbSMartin Schwidefsky 		/* gmap_create takes last usable address */
1009a3a92c31SDominik Dingel 		if (new_limit != KVM_S390_NO_MEM_LIMIT)
1010a3a92c31SDominik Dingel 			new_limit -= 1;
1011a3a92c31SDominik Dingel 
10128c0a7ce6SDominik Dingel 		ret = -EBUSY;
10138c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
1014a03825bbSPaolo Bonzini 		if (!kvm->created_vcpus) {
10156ea427bbSMartin Schwidefsky 			/* gmap_create will round the limit up */
10166ea427bbSMartin Schwidefsky 			struct gmap *new = gmap_create(current->mm, new_limit);
10178c0a7ce6SDominik Dingel 
10188c0a7ce6SDominik Dingel 			if (!new) {
10198c0a7ce6SDominik Dingel 				ret = -ENOMEM;
10208c0a7ce6SDominik Dingel 			} else {
10216ea427bbSMartin Schwidefsky 				gmap_remove(kvm->arch.gmap);
10228c0a7ce6SDominik Dingel 				new->private = kvm;
10238c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
10248c0a7ce6SDominik Dingel 				ret = 0;
10258c0a7ce6SDominik Dingel 			}
10268c0a7ce6SDominik Dingel 		}
10278c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
1028a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1029a3a92c31SDominik Dingel 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
1030a3a92c31SDominik Dingel 			 (void *) kvm->arch.gmap->asce);
10318c0a7ce6SDominik Dingel 		break;
10328c0a7ce6SDominik Dingel 	}
10334f718eabSDominik Dingel 	default:
10344f718eabSDominik Dingel 		ret = -ENXIO;
10354f718eabSDominik Dingel 		break;
10364f718eabSDominik Dingel 	}
10374f718eabSDominik Dingel 	return ret;
10384f718eabSDominik Dingel }
10394f718eabSDominik Dingel 
1040a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1041a374e892STony Krowiak 
104220c922f0STony Krowiak void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1043a374e892STony Krowiak {
1044a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
1045a374e892STony Krowiak 	int i;
1046a374e892STony Krowiak 
104720c922f0STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
104820c922f0STony Krowiak 
10493194cdb7SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
105020c922f0STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
10513194cdb7SDavid Hildenbrand 		/* recreate the shadow crycb by leaving the VSIE handler */
10523194cdb7SDavid Hildenbrand 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
10533194cdb7SDavid Hildenbrand 	}
105420c922f0STony Krowiak 
105520c922f0STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
105620c922f0STony Krowiak }
105720c922f0STony Krowiak 
105820c922f0STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
105920c922f0STony Krowiak {
1060a374e892STony Krowiak 	mutex_lock(&kvm->lock);
1061a374e892STony Krowiak 	switch (attr->attr) {
1062a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
10638e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10648e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
106537940fb0STony Krowiak 			return -EINVAL;
10668e41bd54SChristian Borntraeger 		}
1067a374e892STony Krowiak 		get_random_bytes(
1068a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1069a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1070a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
1071c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1072a374e892STony Krowiak 		break;
1073a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
10748e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10758e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
107637940fb0STony Krowiak 			return -EINVAL;
10778e41bd54SChristian Borntraeger 		}
1078a374e892STony Krowiak 		get_random_bytes(
1079a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1080a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1081a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
1082c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1083a374e892STony Krowiak 		break;
1084a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
10858e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10868e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
108737940fb0STony Krowiak 			return -EINVAL;
10888e41bd54SChristian Borntraeger 		}
1089a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
1090a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1091a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1092c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1093a374e892STony Krowiak 		break;
1094a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
10958e41bd54SChristian Borntraeger 		if (!test_kvm_facility(kvm, 76)) {
10968e41bd54SChristian Borntraeger 			mutex_unlock(&kvm->lock);
109737940fb0STony Krowiak 			return -EINVAL;
10988e41bd54SChristian Borntraeger 		}
1099a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
1100a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1101a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1102c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1103a374e892STony Krowiak 		break;
110437940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_APIE:
110537940fb0STony Krowiak 		if (!ap_instructions_available()) {
110637940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
110737940fb0STony Krowiak 			return -EOPNOTSUPP;
110837940fb0STony Krowiak 		}
110937940fb0STony Krowiak 		kvm->arch.crypto.apie = 1;
111037940fb0STony Krowiak 		break;
111137940fb0STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_APIE:
111237940fb0STony Krowiak 		if (!ap_instructions_available()) {
111337940fb0STony Krowiak 			mutex_unlock(&kvm->lock);
111437940fb0STony Krowiak 			return -EOPNOTSUPP;
111537940fb0STony Krowiak 		}
111637940fb0STony Krowiak 		kvm->arch.crypto.apie = 0;
111737940fb0STony Krowiak 		break;
1118a374e892STony Krowiak 	default:
1119a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
1120a374e892STony Krowiak 		return -ENXIO;
1121a374e892STony Krowiak 	}
1122a374e892STony Krowiak 
112320c922f0STony Krowiak 	kvm_s390_vcpu_crypto_reset_all(kvm);
1124a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
1125a374e892STony Krowiak 	return 0;
1126a374e892STony Krowiak }
1127a374e892STony Krowiak 
1128190df4a2SClaudio Imbrenda static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1129190df4a2SClaudio Imbrenda {
1130190df4a2SClaudio Imbrenda 	int cx;
1131190df4a2SClaudio Imbrenda 	struct kvm_vcpu *vcpu;
1132190df4a2SClaudio Imbrenda 
1133190df4a2SClaudio Imbrenda 	kvm_for_each_vcpu(cx, vcpu, kvm)
1134190df4a2SClaudio Imbrenda 		kvm_s390_sync_request(req, vcpu);
1135190df4a2SClaudio Imbrenda }
1136190df4a2SClaudio Imbrenda 
1137190df4a2SClaudio Imbrenda /*
1138190df4a2SClaudio Imbrenda  * Must be called with kvm->srcu held to avoid races on memslots, and with
11391de1ea7eSChristian Borntraeger  * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1140190df4a2SClaudio Imbrenda  */
1141190df4a2SClaudio Imbrenda static int kvm_s390_vm_start_migration(struct kvm *kvm)
1142190df4a2SClaudio Imbrenda {
1143190df4a2SClaudio Imbrenda 	struct kvm_memory_slot *ms;
1144190df4a2SClaudio Imbrenda 	struct kvm_memslots *slots;
1145afdad616SClaudio Imbrenda 	unsigned long ram_pages = 0;
1146190df4a2SClaudio Imbrenda 	int slotnr;
1147190df4a2SClaudio Imbrenda 
1148190df4a2SClaudio Imbrenda 	/* migration mode already enabled */
1149afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
1150190df4a2SClaudio Imbrenda 		return 0;
1151190df4a2SClaudio Imbrenda 	slots = kvm_memslots(kvm);
1152190df4a2SClaudio Imbrenda 	if (!slots || !slots->used_slots)
1153190df4a2SClaudio Imbrenda 		return -EINVAL;
1154190df4a2SClaudio Imbrenda 
1155afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma) {
1156afdad616SClaudio Imbrenda 		kvm->arch.migration_mode = 1;
1157afdad616SClaudio Imbrenda 		return 0;
1158190df4a2SClaudio Imbrenda 	}
1159190df4a2SClaudio Imbrenda 	/* mark all the pages in active slots as dirty */
1160190df4a2SClaudio Imbrenda 	for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1161190df4a2SClaudio Imbrenda 		ms = slots->memslots + slotnr;
116213a17cc0SIgor Mammedov 		if (!ms->dirty_bitmap)
116313a17cc0SIgor Mammedov 			return -EINVAL;
1164afdad616SClaudio Imbrenda 		/*
1165afdad616SClaudio Imbrenda 		 * The second half of the bitmap is only used on x86,
1166afdad616SClaudio Imbrenda 		 * and would be wasted otherwise, so we put it to good
1167afdad616SClaudio Imbrenda 		 * use here to keep track of the state of the storage
1168afdad616SClaudio Imbrenda 		 * attributes.
1169afdad616SClaudio Imbrenda 		 */
1170afdad616SClaudio Imbrenda 		memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1171afdad616SClaudio Imbrenda 		ram_pages += ms->npages;
1172190df4a2SClaudio Imbrenda 	}
1173afdad616SClaudio Imbrenda 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1174afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 1;
1175190df4a2SClaudio Imbrenda 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1176190df4a2SClaudio Imbrenda 	return 0;
1177190df4a2SClaudio Imbrenda }
1178190df4a2SClaudio Imbrenda 
1179190df4a2SClaudio Imbrenda /*
11801de1ea7eSChristian Borntraeger  * Must be called with kvm->slots_lock to avoid races with ourselves and
1181190df4a2SClaudio Imbrenda  * kvm_s390_vm_start_migration.
1182190df4a2SClaudio Imbrenda  */
1183190df4a2SClaudio Imbrenda static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1184190df4a2SClaudio Imbrenda {
1185190df4a2SClaudio Imbrenda 	/* migration mode already disabled */
1186afdad616SClaudio Imbrenda 	if (!kvm->arch.migration_mode)
1187190df4a2SClaudio Imbrenda 		return 0;
1188afdad616SClaudio Imbrenda 	kvm->arch.migration_mode = 0;
1189afdad616SClaudio Imbrenda 	if (kvm->arch.use_cmma)
1190190df4a2SClaudio Imbrenda 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1191190df4a2SClaudio Imbrenda 	return 0;
1192190df4a2SClaudio Imbrenda }
1193190df4a2SClaudio Imbrenda 
1194190df4a2SClaudio Imbrenda static int kvm_s390_vm_set_migration(struct kvm *kvm,
1195190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1196190df4a2SClaudio Imbrenda {
11971de1ea7eSChristian Borntraeger 	int res = -ENXIO;
1198190df4a2SClaudio Imbrenda 
11991de1ea7eSChristian Borntraeger 	mutex_lock(&kvm->slots_lock);
1200190df4a2SClaudio Imbrenda 	switch (attr->attr) {
1201190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_START:
1202190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_start_migration(kvm);
1203190df4a2SClaudio Imbrenda 		break;
1204190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION_STOP:
1205190df4a2SClaudio Imbrenda 		res = kvm_s390_vm_stop_migration(kvm);
1206190df4a2SClaudio Imbrenda 		break;
1207190df4a2SClaudio Imbrenda 	default:
1208190df4a2SClaudio Imbrenda 		break;
1209190df4a2SClaudio Imbrenda 	}
12101de1ea7eSChristian Borntraeger 	mutex_unlock(&kvm->slots_lock);
1211190df4a2SClaudio Imbrenda 
1212190df4a2SClaudio Imbrenda 	return res;
1213190df4a2SClaudio Imbrenda }
1214190df4a2SClaudio Imbrenda 
1215190df4a2SClaudio Imbrenda static int kvm_s390_vm_get_migration(struct kvm *kvm,
1216190df4a2SClaudio Imbrenda 				     struct kvm_device_attr *attr)
1217190df4a2SClaudio Imbrenda {
1218afdad616SClaudio Imbrenda 	u64 mig = kvm->arch.migration_mode;
1219190df4a2SClaudio Imbrenda 
1220190df4a2SClaudio Imbrenda 	if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1221190df4a2SClaudio Imbrenda 		return -ENXIO;
1222190df4a2SClaudio Imbrenda 
1223190df4a2SClaudio Imbrenda 	if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1224190df4a2SClaudio Imbrenda 		return -EFAULT;
1225190df4a2SClaudio Imbrenda 	return 0;
1226190df4a2SClaudio Imbrenda }
1227190df4a2SClaudio Imbrenda 
12288fa1696eSCollin L. Walling static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
12298fa1696eSCollin L. Walling {
12308fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
12318fa1696eSCollin L. Walling 
12328fa1696eSCollin L. Walling 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
12338fa1696eSCollin L. Walling 		return -EFAULT;
12348fa1696eSCollin L. Walling 
12350e7def5fSDavid Hildenbrand 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
12368fa1696eSCollin L. Walling 		return -EINVAL;
12370e7def5fSDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, &gtod);
12388fa1696eSCollin L. Walling 
12398fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
12408fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
12418fa1696eSCollin L. Walling 
12428fa1696eSCollin L. Walling 	return 0;
12438fa1696eSCollin L. Walling }
12448fa1696eSCollin L. Walling 
124572f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
124672f25020SJason J. Herne {
124772f25020SJason J. Herne 	u8 gtod_high;
124872f25020SJason J. Herne 
124972f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
125072f25020SJason J. Herne 					   sizeof(gtod_high)))
125172f25020SJason J. Herne 		return -EFAULT;
125272f25020SJason J. Herne 
125372f25020SJason J. Herne 	if (gtod_high != 0)
125472f25020SJason J. Herne 		return -EINVAL;
125558c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
125672f25020SJason J. Herne 
125772f25020SJason J. Herne 	return 0;
125872f25020SJason J. Herne }
125972f25020SJason J. Herne 
126072f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
126172f25020SJason J. Herne {
12620e7def5fSDavid Hildenbrand 	struct kvm_s390_vm_tod_clock gtod = { 0 };
126372f25020SJason J. Herne 
12640e7def5fSDavid Hildenbrand 	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
12650e7def5fSDavid Hildenbrand 			   sizeof(gtod.tod)))
126672f25020SJason J. Herne 		return -EFAULT;
126772f25020SJason J. Herne 
12680e7def5fSDavid Hildenbrand 	kvm_s390_set_tod_clock(kvm, &gtod);
12690e7def5fSDavid Hildenbrand 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
127072f25020SJason J. Herne 	return 0;
127172f25020SJason J. Herne }
127272f25020SJason J. Herne 
127372f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
127472f25020SJason J. Herne {
127572f25020SJason J. Herne 	int ret;
127672f25020SJason J. Herne 
127772f25020SJason J. Herne 	if (attr->flags)
127872f25020SJason J. Herne 		return -EINVAL;
127972f25020SJason J. Herne 
128072f25020SJason J. Herne 	switch (attr->attr) {
12818fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
12828fa1696eSCollin L. Walling 		ret = kvm_s390_set_tod_ext(kvm, attr);
12838fa1696eSCollin L. Walling 		break;
128472f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
128572f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
128672f25020SJason J. Herne 		break;
128772f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
128872f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
128972f25020SJason J. Herne 		break;
129072f25020SJason J. Herne 	default:
129172f25020SJason J. Herne 		ret = -ENXIO;
129272f25020SJason J. Herne 		break;
129372f25020SJason J. Herne 	}
129472f25020SJason J. Herne 	return ret;
129572f25020SJason J. Herne }
129672f25020SJason J. Herne 
129733d1b272SDavid Hildenbrand static void kvm_s390_get_tod_clock(struct kvm *kvm,
12988fa1696eSCollin L. Walling 				   struct kvm_s390_vm_tod_clock *gtod)
12998fa1696eSCollin L. Walling {
13002cfd7b73SHeiko Carstens 	union tod_clock clk;
13018fa1696eSCollin L. Walling 
13028fa1696eSCollin L. Walling 	preempt_disable();
13038fa1696eSCollin L. Walling 
13042cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
13058fa1696eSCollin L. Walling 
13062cfd7b73SHeiko Carstens 	gtod->tod = clk.tod + kvm->arch.epoch;
130733d1b272SDavid Hildenbrand 	gtod->epoch_idx = 0;
130833d1b272SDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
13092cfd7b73SHeiko Carstens 		gtod->epoch_idx = clk.ei + kvm->arch.epdx;
13102cfd7b73SHeiko Carstens 		if (gtod->tod < clk.tod)
13118fa1696eSCollin L. Walling 			gtod->epoch_idx += 1;
131233d1b272SDavid Hildenbrand 	}
13138fa1696eSCollin L. Walling 
13148fa1696eSCollin L. Walling 	preempt_enable();
13158fa1696eSCollin L. Walling }
13168fa1696eSCollin L. Walling 
13178fa1696eSCollin L. Walling static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
13188fa1696eSCollin L. Walling {
13198fa1696eSCollin L. Walling 	struct kvm_s390_vm_tod_clock gtod;
13208fa1696eSCollin L. Walling 
13218fa1696eSCollin L. Walling 	memset(&gtod, 0, sizeof(gtod));
132233d1b272SDavid Hildenbrand 	kvm_s390_get_tod_clock(kvm, &gtod);
13238fa1696eSCollin L. Walling 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
13248fa1696eSCollin L. Walling 		return -EFAULT;
13258fa1696eSCollin L. Walling 
13268fa1696eSCollin L. Walling 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
13278fa1696eSCollin L. Walling 		gtod.epoch_idx, gtod.tod);
13288fa1696eSCollin L. Walling 	return 0;
13298fa1696eSCollin L. Walling }
13308fa1696eSCollin L. Walling 
133172f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
133272f25020SJason J. Herne {
133372f25020SJason J. Herne 	u8 gtod_high = 0;
133472f25020SJason J. Herne 
133572f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
133672f25020SJason J. Herne 					 sizeof(gtod_high)))
133772f25020SJason J. Herne 		return -EFAULT;
133858c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
133972f25020SJason J. Herne 
134072f25020SJason J. Herne 	return 0;
134172f25020SJason J. Herne }
134272f25020SJason J. Herne 
134372f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
134472f25020SJason J. Herne {
13455a3d883aSDavid Hildenbrand 	u64 gtod;
134672f25020SJason J. Herne 
134760417fccSDavid Hildenbrand 	gtod = kvm_s390_get_tod_clock_fast(kvm);
134872f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
134972f25020SJason J. Herne 		return -EFAULT;
135058c383c6SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
135172f25020SJason J. Herne 
135272f25020SJason J. Herne 	return 0;
135372f25020SJason J. Herne }
135472f25020SJason J. Herne 
135572f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
135672f25020SJason J. Herne {
135772f25020SJason J. Herne 	int ret;
135872f25020SJason J. Herne 
135972f25020SJason J. Herne 	if (attr->flags)
136072f25020SJason J. Herne 		return -EINVAL;
136172f25020SJason J. Herne 
136272f25020SJason J. Herne 	switch (attr->attr) {
13638fa1696eSCollin L. Walling 	case KVM_S390_VM_TOD_EXT:
13648fa1696eSCollin L. Walling 		ret = kvm_s390_get_tod_ext(kvm, attr);
13658fa1696eSCollin L. Walling 		break;
136672f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
136772f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
136872f25020SJason J. Herne 		break;
136972f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
137072f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
137172f25020SJason J. Herne 		break;
137272f25020SJason J. Herne 	default:
137372f25020SJason J. Herne 		ret = -ENXIO;
137472f25020SJason J. Herne 		break;
137572f25020SJason J. Herne 	}
137672f25020SJason J. Herne 	return ret;
137772f25020SJason J. Herne }
137872f25020SJason J. Herne 
1379658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1380658b6edaSMichael Mueller {
1381658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1382053dd230SDavid Hildenbrand 	u16 lowest_ibc, unblocked_ibc;
1383658b6edaSMichael Mueller 	int ret = 0;
1384658b6edaSMichael Mueller 
1385658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
1386a03825bbSPaolo Bonzini 	if (kvm->created_vcpus) {
1387658b6edaSMichael Mueller 		ret = -EBUSY;
1388658b6edaSMichael Mueller 		goto out;
1389658b6edaSMichael Mueller 	}
1390c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1391658b6edaSMichael Mueller 	if (!proc) {
1392658b6edaSMichael Mueller 		ret = -ENOMEM;
1393658b6edaSMichael Mueller 		goto out;
1394658b6edaSMichael Mueller 	}
1395658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
1396658b6edaSMichael Mueller 			    sizeof(*proc))) {
13979bb0ec09SDavid Hildenbrand 		kvm->arch.model.cpuid = proc->cpuid;
1398053dd230SDavid Hildenbrand 		lowest_ibc = sclp.ibc >> 16 & 0xfff;
1399053dd230SDavid Hildenbrand 		unblocked_ibc = sclp.ibc & 0xfff;
14000487c44dSDavid Hildenbrand 		if (lowest_ibc && proc->ibc) {
1401053dd230SDavid Hildenbrand 			if (proc->ibc > unblocked_ibc)
1402053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = unblocked_ibc;
1403053dd230SDavid Hildenbrand 			else if (proc->ibc < lowest_ibc)
1404053dd230SDavid Hildenbrand 				kvm->arch.model.ibc = lowest_ibc;
1405053dd230SDavid Hildenbrand 			else
1406658b6edaSMichael Mueller 				kvm->arch.model.ibc = proc->ibc;
1407053dd230SDavid Hildenbrand 		}
1408c54f0d6aSDavid Hildenbrand 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
1409658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
1410a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1411a8c39dd7SChristian Borntraeger 			 kvm->arch.model.ibc,
1412a8c39dd7SChristian Borntraeger 			 kvm->arch.model.cpuid);
1413a8c39dd7SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1414a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[0],
1415a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[1],
1416a8c39dd7SChristian Borntraeger 			 kvm->arch.model.fac_list[2]);
1417658b6edaSMichael Mueller 	} else
1418658b6edaSMichael Mueller 		ret = -EFAULT;
1419658b6edaSMichael Mueller 	kfree(proc);
1420658b6edaSMichael Mueller out:
1421658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
1422658b6edaSMichael Mueller 	return ret;
1423658b6edaSMichael Mueller }
1424658b6edaSMichael Mueller 
142515c9705fSDavid Hildenbrand static int kvm_s390_set_processor_feat(struct kvm *kvm,
142615c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
142715c9705fSDavid Hildenbrand {
142815c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
142915c9705fSDavid Hildenbrand 
143015c9705fSDavid Hildenbrand 	if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
143115c9705fSDavid Hildenbrand 		return -EFAULT;
143215c9705fSDavid Hildenbrand 	if (!bitmap_subset((unsigned long *) data.feat,
143315c9705fSDavid Hildenbrand 			   kvm_s390_available_cpu_feat,
143415c9705fSDavid Hildenbrand 			   KVM_S390_VM_CPU_FEAT_NR_BITS))
143515c9705fSDavid Hildenbrand 		return -EINVAL;
143615c9705fSDavid Hildenbrand 
143715c9705fSDavid Hildenbrand 	mutex_lock(&kvm->lock);
14382f8311c9SChristian Borntraeger 	if (kvm->created_vcpus) {
14392f8311c9SChristian Borntraeger 		mutex_unlock(&kvm->lock);
14402f8311c9SChristian Borntraeger 		return -EBUSY;
14412f8311c9SChristian Borntraeger 	}
144215c9705fSDavid Hildenbrand 	bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
144315c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
144415c9705fSDavid Hildenbrand 	mutex_unlock(&kvm->lock);
14452f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
14462f8311c9SChristian Borntraeger 			 data.feat[0],
14472f8311c9SChristian Borntraeger 			 data.feat[1],
14482f8311c9SChristian Borntraeger 			 data.feat[2]);
14492f8311c9SChristian Borntraeger 	return 0;
145015c9705fSDavid Hildenbrand }
145115c9705fSDavid Hildenbrand 
14520a763c78SDavid Hildenbrand static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
14530a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
14540a763c78SDavid Hildenbrand {
1455346fa2f8SChristian Borntraeger 	mutex_lock(&kvm->lock);
1456346fa2f8SChristian Borntraeger 	if (kvm->created_vcpus) {
1457346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1458346fa2f8SChristian Borntraeger 		return -EBUSY;
1459346fa2f8SChristian Borntraeger 	}
1460346fa2f8SChristian Borntraeger 
1461346fa2f8SChristian Borntraeger 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1462346fa2f8SChristian Borntraeger 			   sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1463346fa2f8SChristian Borntraeger 		mutex_unlock(&kvm->lock);
1464346fa2f8SChristian Borntraeger 		return -EFAULT;
1465346fa2f8SChristian Borntraeger 	}
1466346fa2f8SChristian Borntraeger 	mutex_unlock(&kvm->lock);
1467346fa2f8SChristian Borntraeger 
146811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
146911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
147011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
147111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
147211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
147311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
147411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
147511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
147611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
147711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
147811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
147911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
148011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
148111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
148211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
148311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
148411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
148511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
148611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
148711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
148811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
148911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
149011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
149111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
149211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
149311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
149411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
149511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
149611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
149711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
149811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
149911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
150011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
150111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
150211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
150311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
150411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
150511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
150611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
150711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
150811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
150911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
151011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
151111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
151213209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
151313209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
151413209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1515173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1516173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1517173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1518173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1519173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
15204f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
15214f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
15224f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
15234f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
15244f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
152511ba5961SChristian Borntraeger 
1526346fa2f8SChristian Borntraeger 	return 0;
15270a763c78SDavid Hildenbrand }
15280a763c78SDavid Hildenbrand 
1529658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1530658b6edaSMichael Mueller {
1531658b6edaSMichael Mueller 	int ret = -ENXIO;
1532658b6edaSMichael Mueller 
1533658b6edaSMichael Mueller 	switch (attr->attr) {
1534658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1535658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
1536658b6edaSMichael Mueller 		break;
153715c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
153815c9705fSDavid Hildenbrand 		ret = kvm_s390_set_processor_feat(kvm, attr);
153915c9705fSDavid Hildenbrand 		break;
15400a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
15410a763c78SDavid Hildenbrand 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
15420a763c78SDavid Hildenbrand 		break;
1543658b6edaSMichael Mueller 	}
1544658b6edaSMichael Mueller 	return ret;
1545658b6edaSMichael Mueller }
1546658b6edaSMichael Mueller 
1547658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1548658b6edaSMichael Mueller {
1549658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
1550658b6edaSMichael Mueller 	int ret = 0;
1551658b6edaSMichael Mueller 
1552c4196218SChristian Borntraeger 	proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1553658b6edaSMichael Mueller 	if (!proc) {
1554658b6edaSMichael Mueller 		ret = -ENOMEM;
1555658b6edaSMichael Mueller 		goto out;
1556658b6edaSMichael Mueller 	}
15579bb0ec09SDavid Hildenbrand 	proc->cpuid = kvm->arch.model.cpuid;
1558658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
1559c54f0d6aSDavid Hildenbrand 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1560c54f0d6aSDavid Hildenbrand 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1561a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1562a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1563a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1564a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1565a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[0],
1566a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[1],
1567a8c39dd7SChristian Borntraeger 		 kvm->arch.model.fac_list[2]);
1568658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1569658b6edaSMichael Mueller 		ret = -EFAULT;
1570658b6edaSMichael Mueller 	kfree(proc);
1571658b6edaSMichael Mueller out:
1572658b6edaSMichael Mueller 	return ret;
1573658b6edaSMichael Mueller }
1574658b6edaSMichael Mueller 
1575658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1576658b6edaSMichael Mueller {
1577658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
1578658b6edaSMichael Mueller 	int ret = 0;
1579658b6edaSMichael Mueller 
1580c4196218SChristian Borntraeger 	mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1581658b6edaSMichael Mueller 	if (!mach) {
1582658b6edaSMichael Mueller 		ret = -ENOMEM;
1583658b6edaSMichael Mueller 		goto out;
1584658b6edaSMichael Mueller 	}
1585658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
158637c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
1587c54f0d6aSDavid Hildenbrand 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1588981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1589658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
159004478197SChristian Borntraeger 	       sizeof(S390_lowcore.stfle_fac_list));
1591a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
1592a8c39dd7SChristian Borntraeger 		 kvm->arch.model.ibc,
1593a8c39dd7SChristian Borntraeger 		 kvm->arch.model.cpuid);
1594a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
1595a8c39dd7SChristian Borntraeger 		 mach->fac_mask[0],
1596a8c39dd7SChristian Borntraeger 		 mach->fac_mask[1],
1597a8c39dd7SChristian Borntraeger 		 mach->fac_mask[2]);
1598a8c39dd7SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
1599a8c39dd7SChristian Borntraeger 		 mach->fac_list[0],
1600a8c39dd7SChristian Borntraeger 		 mach->fac_list[1],
1601a8c39dd7SChristian Borntraeger 		 mach->fac_list[2]);
1602658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1603658b6edaSMichael Mueller 		ret = -EFAULT;
1604658b6edaSMichael Mueller 	kfree(mach);
1605658b6edaSMichael Mueller out:
1606658b6edaSMichael Mueller 	return ret;
1607658b6edaSMichael Mueller }
1608658b6edaSMichael Mueller 
160915c9705fSDavid Hildenbrand static int kvm_s390_get_processor_feat(struct kvm *kvm,
161015c9705fSDavid Hildenbrand 				       struct kvm_device_attr *attr)
161115c9705fSDavid Hildenbrand {
161215c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
161315c9705fSDavid Hildenbrand 
161415c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
161515c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
161615c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
161715c9705fSDavid Hildenbrand 		return -EFAULT;
16182f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
16192f8311c9SChristian Borntraeger 			 data.feat[0],
16202f8311c9SChristian Borntraeger 			 data.feat[1],
16212f8311c9SChristian Borntraeger 			 data.feat[2]);
162215c9705fSDavid Hildenbrand 	return 0;
162315c9705fSDavid Hildenbrand }
162415c9705fSDavid Hildenbrand 
162515c9705fSDavid Hildenbrand static int kvm_s390_get_machine_feat(struct kvm *kvm,
162615c9705fSDavid Hildenbrand 				     struct kvm_device_attr *attr)
162715c9705fSDavid Hildenbrand {
162815c9705fSDavid Hildenbrand 	struct kvm_s390_vm_cpu_feat data;
162915c9705fSDavid Hildenbrand 
163015c9705fSDavid Hildenbrand 	bitmap_copy((unsigned long *) data.feat,
163115c9705fSDavid Hildenbrand 		    kvm_s390_available_cpu_feat,
163215c9705fSDavid Hildenbrand 		    KVM_S390_VM_CPU_FEAT_NR_BITS);
163315c9705fSDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
163415c9705fSDavid Hildenbrand 		return -EFAULT;
16352f8311c9SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
16362f8311c9SChristian Borntraeger 			 data.feat[0],
16372f8311c9SChristian Borntraeger 			 data.feat[1],
16382f8311c9SChristian Borntraeger 			 data.feat[2]);
163915c9705fSDavid Hildenbrand 	return 0;
164015c9705fSDavid Hildenbrand }
164115c9705fSDavid Hildenbrand 
16420a763c78SDavid Hildenbrand static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
16430a763c78SDavid Hildenbrand 					  struct kvm_device_attr *attr)
16440a763c78SDavid Hildenbrand {
1645346fa2f8SChristian Borntraeger 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1646346fa2f8SChristian Borntraeger 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
1647346fa2f8SChristian Borntraeger 		return -EFAULT;
1648346fa2f8SChristian Borntraeger 
164911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
165011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
165111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
165211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
165311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
165411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
165511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
165611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
165711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
165811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
165911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
166011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
166111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
166211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
166311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
166411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
166511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
166611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
166711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
166811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
166911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
167011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
167111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
167211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
167311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
167411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
167511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
167611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
167711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
167811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
167911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
168011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
168111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
168211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
168311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
168411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
168511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
168611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
168711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
168811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
168911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
169011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
169111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
169211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
169313209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
169413209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
169513209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1696173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1697173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1698173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1699173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1700173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
17014f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17024f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
17034f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
17044f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
17054f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
170611ba5961SChristian Borntraeger 
1707346fa2f8SChristian Borntraeger 	return 0;
17080a763c78SDavid Hildenbrand }
17090a763c78SDavid Hildenbrand 
17100a763c78SDavid Hildenbrand static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
17110a763c78SDavid Hildenbrand 					struct kvm_device_attr *attr)
17120a763c78SDavid Hildenbrand {
17130a763c78SDavid Hildenbrand 	if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
17140a763c78SDavid Hildenbrand 	    sizeof(struct kvm_s390_vm_cpu_subfunc)))
17150a763c78SDavid Hildenbrand 		return -EFAULT;
171611ba5961SChristian Borntraeger 
171711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
171811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
171911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
172011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
172111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
172211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
172311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
172411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
172511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
172611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
172711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
172811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
172911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
173011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
173111ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
173211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
173311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
173411ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
173511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
173611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
173711ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
173811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
173911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
174011ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
174111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
174211ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
174311ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
174411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
174511ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
174611ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
174711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
174811ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
174911ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
175011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
175111ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
175211ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
175311ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
175411ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
175511ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
175611ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
175711ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
175811ba5961SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
175911ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
176011ba5961SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
176113209ad0SChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
176213209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
176313209ad0SChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1764173aec2dSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1765173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1766173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1767173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1768173aec2dSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
17694f45b90eSChristian Borntraeger 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
17704f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
17714f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
17724f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
17734f45b90eSChristian Borntraeger 		 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
177411ba5961SChristian Borntraeger 
17750a763c78SDavid Hildenbrand 	return 0;
17760a763c78SDavid Hildenbrand }
1777346fa2f8SChristian Borntraeger 
1778658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1779658b6edaSMichael Mueller {
1780658b6edaSMichael Mueller 	int ret = -ENXIO;
1781658b6edaSMichael Mueller 
1782658b6edaSMichael Mueller 	switch (attr->attr) {
1783658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
1784658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
1785658b6edaSMichael Mueller 		break;
1786658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
1787658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
1788658b6edaSMichael Mueller 		break;
178915c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_FEAT:
179015c9705fSDavid Hildenbrand 		ret = kvm_s390_get_processor_feat(kvm, attr);
179115c9705fSDavid Hildenbrand 		break;
179215c9705fSDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_FEAT:
179315c9705fSDavid Hildenbrand 		ret = kvm_s390_get_machine_feat(kvm, attr);
179415c9705fSDavid Hildenbrand 		break;
17950a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
17960a763c78SDavid Hildenbrand 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
17970a763c78SDavid Hildenbrand 		break;
17980a763c78SDavid Hildenbrand 	case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
17990a763c78SDavid Hildenbrand 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
18000a763c78SDavid Hildenbrand 		break;
1801658b6edaSMichael Mueller 	}
1802658b6edaSMichael Mueller 	return ret;
1803658b6edaSMichael Mueller }
1804658b6edaSMichael Mueller 
1805f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1806f2061656SDominik Dingel {
1807f2061656SDominik Dingel 	int ret;
1808f2061656SDominik Dingel 
1809f2061656SDominik Dingel 	switch (attr->group) {
18104f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18118c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
18124f718eabSDominik Dingel 		break;
181372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
181472f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
181572f25020SJason J. Herne 		break;
1816658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1817658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
1818658b6edaSMichael Mueller 		break;
1819a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1820a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
1821a374e892STony Krowiak 		break;
1822190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1823190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_set_migration(kvm, attr);
1824190df4a2SClaudio Imbrenda 		break;
1825f2061656SDominik Dingel 	default:
1826f2061656SDominik Dingel 		ret = -ENXIO;
1827f2061656SDominik Dingel 		break;
1828f2061656SDominik Dingel 	}
1829f2061656SDominik Dingel 
1830f2061656SDominik Dingel 	return ret;
1831f2061656SDominik Dingel }
1832f2061656SDominik Dingel 
1833f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1834f2061656SDominik Dingel {
18358c0a7ce6SDominik Dingel 	int ret;
18368c0a7ce6SDominik Dingel 
18378c0a7ce6SDominik Dingel 	switch (attr->group) {
18388c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18398c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
18408c0a7ce6SDominik Dingel 		break;
184172f25020SJason J. Herne 	case KVM_S390_VM_TOD:
184272f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
184372f25020SJason J. Herne 		break;
1844658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1845658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
1846658b6edaSMichael Mueller 		break;
1847190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1848190df4a2SClaudio Imbrenda 		ret = kvm_s390_vm_get_migration(kvm, attr);
1849190df4a2SClaudio Imbrenda 		break;
18508c0a7ce6SDominik Dingel 	default:
18518c0a7ce6SDominik Dingel 		ret = -ENXIO;
18528c0a7ce6SDominik Dingel 		break;
18538c0a7ce6SDominik Dingel 	}
18548c0a7ce6SDominik Dingel 
18558c0a7ce6SDominik Dingel 	return ret;
1856f2061656SDominik Dingel }
1857f2061656SDominik Dingel 
1858f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1859f2061656SDominik Dingel {
1860f2061656SDominik Dingel 	int ret;
1861f2061656SDominik Dingel 
1862f2061656SDominik Dingel 	switch (attr->group) {
18634f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
18644f718eabSDominik Dingel 		switch (attr->attr) {
18654f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
18664f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
1867f9cbd9b0SDavid Hildenbrand 			ret = sclp.has_cmma ? 0 : -ENXIO;
1868f9cbd9b0SDavid Hildenbrand 			break;
18698c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
18704f718eabSDominik Dingel 			ret = 0;
18714f718eabSDominik Dingel 			break;
18724f718eabSDominik Dingel 		default:
18734f718eabSDominik Dingel 			ret = -ENXIO;
18744f718eabSDominik Dingel 			break;
18754f718eabSDominik Dingel 		}
18764f718eabSDominik Dingel 		break;
187772f25020SJason J. Herne 	case KVM_S390_VM_TOD:
187872f25020SJason J. Herne 		switch (attr->attr) {
187972f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
188072f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
188172f25020SJason J. Herne 			ret = 0;
188272f25020SJason J. Herne 			break;
188372f25020SJason J. Herne 		default:
188472f25020SJason J. Herne 			ret = -ENXIO;
188572f25020SJason J. Herne 			break;
188672f25020SJason J. Herne 		}
188772f25020SJason J. Herne 		break;
1888658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
1889658b6edaSMichael Mueller 		switch (attr->attr) {
1890658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
1891658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
189215c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_PROCESSOR_FEAT:
189315c9705fSDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_FEAT:
18940a763c78SDavid Hildenbrand 		case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1895346fa2f8SChristian Borntraeger 		case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1896658b6edaSMichael Mueller 			ret = 0;
1897658b6edaSMichael Mueller 			break;
1898658b6edaSMichael Mueller 		default:
1899658b6edaSMichael Mueller 			ret = -ENXIO;
1900658b6edaSMichael Mueller 			break;
1901658b6edaSMichael Mueller 		}
1902658b6edaSMichael Mueller 		break;
1903a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
1904a374e892STony Krowiak 		switch (attr->attr) {
1905a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1906a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1907a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1908a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1909a374e892STony Krowiak 			ret = 0;
1910a374e892STony Krowiak 			break;
191137940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_APIE:
191237940fb0STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_APIE:
191337940fb0STony Krowiak 			ret = ap_instructions_available() ? 0 : -ENXIO;
191437940fb0STony Krowiak 			break;
1915a374e892STony Krowiak 		default:
1916a374e892STony Krowiak 			ret = -ENXIO;
1917a374e892STony Krowiak 			break;
1918a374e892STony Krowiak 		}
1919a374e892STony Krowiak 		break;
1920190df4a2SClaudio Imbrenda 	case KVM_S390_VM_MIGRATION:
1921190df4a2SClaudio Imbrenda 		ret = 0;
1922190df4a2SClaudio Imbrenda 		break;
1923f2061656SDominik Dingel 	default:
1924f2061656SDominik Dingel 		ret = -ENXIO;
1925f2061656SDominik Dingel 		break;
1926f2061656SDominik Dingel 	}
1927f2061656SDominik Dingel 
1928f2061656SDominik Dingel 	return ret;
1929f2061656SDominik Dingel }
1930f2061656SDominik Dingel 
193130ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
193230ee2a98SJason J. Herne {
193330ee2a98SJason J. Herne 	uint8_t *keys;
193430ee2a98SJason J. Herne 	uint64_t hva;
19354f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
193630ee2a98SJason J. Herne 
193730ee2a98SJason J. Herne 	if (args->flags != 0)
193830ee2a98SJason J. Herne 		return -EINVAL;
193930ee2a98SJason J. Herne 
194030ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
194155531b74SJanosch Frank 	if (!mm_uses_skeys(current->mm))
194230ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
194330ee2a98SJason J. Herne 
194430ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
194530ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
194630ee2a98SJason J. Herne 		return -EINVAL;
194730ee2a98SJason J. Herne 
1948c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
194930ee2a98SJason J. Herne 	if (!keys)
195030ee2a98SJason J. Herne 		return -ENOMEM;
195130ee2a98SJason J. Herne 
1952d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
19534f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
195430ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
195530ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
195630ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
195730ee2a98SJason J. Herne 			r = -EFAULT;
1958d3ed1ceeSMartin Schwidefsky 			break;
195930ee2a98SJason J. Herne 		}
196030ee2a98SJason J. Herne 
1961154c8c19SDavid Hildenbrand 		r = get_guest_storage_key(current->mm, hva, &keys[i]);
1962154c8c19SDavid Hildenbrand 		if (r)
1963d3ed1ceeSMartin Schwidefsky 			break;
196430ee2a98SJason J. Herne 	}
19654f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1966d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
196730ee2a98SJason J. Herne 
1968d3ed1ceeSMartin Schwidefsky 	if (!r) {
196930ee2a98SJason J. Herne 		r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
197030ee2a98SJason J. Herne 				 sizeof(uint8_t) * args->count);
197130ee2a98SJason J. Herne 		if (r)
197230ee2a98SJason J. Herne 			r = -EFAULT;
1973d3ed1ceeSMartin Schwidefsky 	}
1974d3ed1ceeSMartin Schwidefsky 
197530ee2a98SJason J. Herne 	kvfree(keys);
197630ee2a98SJason J. Herne 	return r;
197730ee2a98SJason J. Herne }
197830ee2a98SJason J. Herne 
197930ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
198030ee2a98SJason J. Herne {
198130ee2a98SJason J. Herne 	uint8_t *keys;
198230ee2a98SJason J. Herne 	uint64_t hva;
19834f899147SChristian Borntraeger 	int srcu_idx, i, r = 0;
1984bd096f64SJanosch Frank 	bool unlocked;
198530ee2a98SJason J. Herne 
198630ee2a98SJason J. Herne 	if (args->flags != 0)
198730ee2a98SJason J. Herne 		return -EINVAL;
198830ee2a98SJason J. Herne 
198930ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
199030ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
199130ee2a98SJason J. Herne 		return -EINVAL;
199230ee2a98SJason J. Herne 
1993c4196218SChristian Borntraeger 	keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
199430ee2a98SJason J. Herne 	if (!keys)
199530ee2a98SJason J. Herne 		return -ENOMEM;
199630ee2a98SJason J. Herne 
199730ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
199830ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
199930ee2a98SJason J. Herne 	if (r) {
200030ee2a98SJason J. Herne 		r = -EFAULT;
200130ee2a98SJason J. Herne 		goto out;
200230ee2a98SJason J. Herne 	}
200330ee2a98SJason J. Herne 
200430ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
200514d4a425SDominik Dingel 	r = s390_enable_skey();
200614d4a425SDominik Dingel 	if (r)
200714d4a425SDominik Dingel 		goto out;
200830ee2a98SJason J. Herne 
2009bd096f64SJanosch Frank 	i = 0;
2010d8ed45c5SMichel Lespinasse 	mmap_read_lock(current->mm);
20114f899147SChristian Borntraeger 	srcu_idx = srcu_read_lock(&kvm->srcu);
2012bd096f64SJanosch Frank         while (i < args->count) {
2013bd096f64SJanosch Frank 		unlocked = false;
201430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
201530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
201630ee2a98SJason J. Herne 			r = -EFAULT;
2017d3ed1ceeSMartin Schwidefsky 			break;
201830ee2a98SJason J. Herne 		}
201930ee2a98SJason J. Herne 
202030ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
202130ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
202230ee2a98SJason J. Herne 			r = -EINVAL;
2023d3ed1ceeSMartin Schwidefsky 			break;
202430ee2a98SJason J. Herne 		}
202530ee2a98SJason J. Herne 
2026fe69eabfSDavid Hildenbrand 		r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2027bd096f64SJanosch Frank 		if (r) {
202864019a2eSPeter Xu 			r = fixup_user_fault(current->mm, hva,
2029bd096f64SJanosch Frank 					     FAULT_FLAG_WRITE, &unlocked);
203030ee2a98SJason J. Herne 			if (r)
2031d3ed1ceeSMartin Schwidefsky 				break;
203230ee2a98SJason J. Herne 		}
2033bd096f64SJanosch Frank 		if (!r)
2034bd096f64SJanosch Frank 			i++;
2035bd096f64SJanosch Frank 	}
20364f899147SChristian Borntraeger 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2037d8ed45c5SMichel Lespinasse 	mmap_read_unlock(current->mm);
203830ee2a98SJason J. Herne out:
203930ee2a98SJason J. Herne 	kvfree(keys);
204030ee2a98SJason J. Herne 	return r;
204130ee2a98SJason J. Herne }
204230ee2a98SJason J. Herne 
20434036e387SClaudio Imbrenda /*
20444036e387SClaudio Imbrenda  * Base address and length must be sent at the start of each block, therefore
20454036e387SClaudio Imbrenda  * it's cheaper to send some clean data, as long as it's less than the size of
20464036e387SClaudio Imbrenda  * two longs.
20474036e387SClaudio Imbrenda  */
20484036e387SClaudio Imbrenda #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
20494036e387SClaudio Imbrenda /* for consistency */
20504036e387SClaudio Imbrenda #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
20514036e387SClaudio Imbrenda 
20524036e387SClaudio Imbrenda /*
2053afdad616SClaudio Imbrenda  * Similar to gfn_to_memslot, but returns the index of a memslot also when the
2054afdad616SClaudio Imbrenda  * address falls in a hole. In that case the index of one of the memslots
2055afdad616SClaudio Imbrenda  * bordering the hole is returned.
2056afdad616SClaudio Imbrenda  */
2057afdad616SClaudio Imbrenda static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
2058afdad616SClaudio Imbrenda {
2059afdad616SClaudio Imbrenda 	int start = 0, end = slots->used_slots;
2060afdad616SClaudio Imbrenda 	int slot = atomic_read(&slots->lru_slot);
2061afdad616SClaudio Imbrenda 	struct kvm_memory_slot *memslots = slots->memslots;
2062afdad616SClaudio Imbrenda 
2063afdad616SClaudio Imbrenda 	if (gfn >= memslots[slot].base_gfn &&
2064afdad616SClaudio Imbrenda 	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
2065afdad616SClaudio Imbrenda 		return slot;
2066afdad616SClaudio Imbrenda 
2067afdad616SClaudio Imbrenda 	while (start < end) {
2068afdad616SClaudio Imbrenda 		slot = start + (end - start) / 2;
2069afdad616SClaudio Imbrenda 
2070afdad616SClaudio Imbrenda 		if (gfn >= memslots[slot].base_gfn)
2071afdad616SClaudio Imbrenda 			end = slot;
2072afdad616SClaudio Imbrenda 		else
2073afdad616SClaudio Imbrenda 			start = slot + 1;
2074afdad616SClaudio Imbrenda 	}
2075afdad616SClaudio Imbrenda 
207697daa028SSean Christopherson 	if (start >= slots->used_slots)
207797daa028SSean Christopherson 		return slots->used_slots - 1;
207897daa028SSean Christopherson 
2079afdad616SClaudio Imbrenda 	if (gfn >= memslots[start].base_gfn &&
2080afdad616SClaudio Imbrenda 	    gfn < memslots[start].base_gfn + memslots[start].npages) {
2081afdad616SClaudio Imbrenda 		atomic_set(&slots->lru_slot, start);
2082afdad616SClaudio Imbrenda 	}
2083afdad616SClaudio Imbrenda 
2084afdad616SClaudio Imbrenda 	return start;
2085afdad616SClaudio Imbrenda }
2086afdad616SClaudio Imbrenda 
2087afdad616SClaudio Imbrenda static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2088afdad616SClaudio Imbrenda 			      u8 *res, unsigned long bufsize)
2089afdad616SClaudio Imbrenda {
2090afdad616SClaudio Imbrenda 	unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2091afdad616SClaudio Imbrenda 
2092afdad616SClaudio Imbrenda 	args->count = 0;
2093afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2094afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2095afdad616SClaudio Imbrenda 		/*
2096afdad616SClaudio Imbrenda 		 * We return an error if the first value was invalid, but we
2097afdad616SClaudio Imbrenda 		 * return successfully if at least one value was copied.
2098afdad616SClaudio Imbrenda 		 */
2099afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2100afdad616SClaudio Imbrenda 			return args->count ? 0 : -EFAULT;
2101afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2102afdad616SClaudio Imbrenda 			pgstev = 0;
2103afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2104afdad616SClaudio Imbrenda 		cur_gfn++;
2105afdad616SClaudio Imbrenda 	}
2106afdad616SClaudio Imbrenda 
2107afdad616SClaudio Imbrenda 	return 0;
2108afdad616SClaudio Imbrenda }
2109afdad616SClaudio Imbrenda 
2110afdad616SClaudio Imbrenda static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2111afdad616SClaudio Imbrenda 					      unsigned long cur_gfn)
2112afdad616SClaudio Imbrenda {
2113afdad616SClaudio Imbrenda 	int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
2114afdad616SClaudio Imbrenda 	struct kvm_memory_slot *ms = slots->memslots + slotidx;
2115afdad616SClaudio Imbrenda 	unsigned long ofs = cur_gfn - ms->base_gfn;
2116afdad616SClaudio Imbrenda 
2117afdad616SClaudio Imbrenda 	if (ms->base_gfn + ms->npages <= cur_gfn) {
2118afdad616SClaudio Imbrenda 		slotidx--;
2119afdad616SClaudio Imbrenda 		/* If we are above the highest slot, wrap around */
2120afdad616SClaudio Imbrenda 		if (slotidx < 0)
2121afdad616SClaudio Imbrenda 			slotidx = slots->used_slots - 1;
2122afdad616SClaudio Imbrenda 
2123afdad616SClaudio Imbrenda 		ms = slots->memslots + slotidx;
2124afdad616SClaudio Imbrenda 		ofs = 0;
2125afdad616SClaudio Imbrenda 	}
2126afdad616SClaudio Imbrenda 	ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2127afdad616SClaudio Imbrenda 	while ((slotidx > 0) && (ofs >= ms->npages)) {
2128afdad616SClaudio Imbrenda 		slotidx--;
2129afdad616SClaudio Imbrenda 		ms = slots->memslots + slotidx;
2130afdad616SClaudio Imbrenda 		ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
2131afdad616SClaudio Imbrenda 	}
2132afdad616SClaudio Imbrenda 	return ms->base_gfn + ofs;
2133afdad616SClaudio Imbrenda }
2134afdad616SClaudio Imbrenda 
2135afdad616SClaudio Imbrenda static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2136afdad616SClaudio Imbrenda 			     u8 *res, unsigned long bufsize)
2137afdad616SClaudio Imbrenda {
2138afdad616SClaudio Imbrenda 	unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2139afdad616SClaudio Imbrenda 	struct kvm_memslots *slots = kvm_memslots(kvm);
2140afdad616SClaudio Imbrenda 	struct kvm_memory_slot *ms;
2141afdad616SClaudio Imbrenda 
21420774a964SSean Christopherson 	if (unlikely(!slots->used_slots))
21430774a964SSean Christopherson 		return 0;
21440774a964SSean Christopherson 
2145afdad616SClaudio Imbrenda 	cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2146afdad616SClaudio Imbrenda 	ms = gfn_to_memslot(kvm, cur_gfn);
2147afdad616SClaudio Imbrenda 	args->count = 0;
2148afdad616SClaudio Imbrenda 	args->start_gfn = cur_gfn;
2149afdad616SClaudio Imbrenda 	if (!ms)
2150afdad616SClaudio Imbrenda 		return 0;
2151afdad616SClaudio Imbrenda 	next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2152afdad616SClaudio Imbrenda 	mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2153afdad616SClaudio Imbrenda 
2154afdad616SClaudio Imbrenda 	while (args->count < bufsize) {
2155afdad616SClaudio Imbrenda 		hva = gfn_to_hva(kvm, cur_gfn);
2156afdad616SClaudio Imbrenda 		if (kvm_is_error_hva(hva))
2157afdad616SClaudio Imbrenda 			return 0;
2158afdad616SClaudio Imbrenda 		/* Decrement only if we actually flipped the bit to 0 */
2159afdad616SClaudio Imbrenda 		if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2160afdad616SClaudio Imbrenda 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
2161afdad616SClaudio Imbrenda 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2162afdad616SClaudio Imbrenda 			pgstev = 0;
2163afdad616SClaudio Imbrenda 		/* Save the value */
2164afdad616SClaudio Imbrenda 		res[args->count++] = (pgstev >> 24) & 0x43;
2165afdad616SClaudio Imbrenda 		/* If the next bit is too far away, stop. */
2166afdad616SClaudio Imbrenda 		if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2167afdad616SClaudio Imbrenda 			return 0;
2168afdad616SClaudio Imbrenda 		/* If we reached the previous "next", find the next one */
2169afdad616SClaudio Imbrenda 		if (cur_gfn == next_gfn)
2170afdad616SClaudio Imbrenda 			next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2171afdad616SClaudio Imbrenda 		/* Reached the end of memory or of the buffer, stop */
2172afdad616SClaudio Imbrenda 		if ((next_gfn >= mem_end) ||
2173afdad616SClaudio Imbrenda 		    (next_gfn - args->start_gfn >= bufsize))
2174afdad616SClaudio Imbrenda 			return 0;
2175afdad616SClaudio Imbrenda 		cur_gfn++;
2176afdad616SClaudio Imbrenda 		/* Reached the end of the current memslot, take the next one. */
2177afdad616SClaudio Imbrenda 		if (cur_gfn - ms->base_gfn >= ms->npages) {
2178afdad616SClaudio Imbrenda 			ms = gfn_to_memslot(kvm, cur_gfn);
2179afdad616SClaudio Imbrenda 			if (!ms)
2180afdad616SClaudio Imbrenda 				return 0;
2181afdad616SClaudio Imbrenda 		}
2182afdad616SClaudio Imbrenda 	}
2183afdad616SClaudio Imbrenda 	return 0;
2184afdad616SClaudio Imbrenda }
2185afdad616SClaudio Imbrenda 
2186afdad616SClaudio Imbrenda /*
21874036e387SClaudio Imbrenda  * This function searches for the next page with dirty CMMA attributes, and
21884036e387SClaudio Imbrenda  * saves the attributes in the buffer up to either the end of the buffer or
21894036e387SClaudio Imbrenda  * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
21904036e387SClaudio Imbrenda  * no trailing clean bytes are saved.
21914036e387SClaudio Imbrenda  * In case no dirty bits were found, or if CMMA was not enabled or used, the
21924036e387SClaudio Imbrenda  * output buffer will indicate 0 as length.
21934036e387SClaudio Imbrenda  */
21944036e387SClaudio Imbrenda static int kvm_s390_get_cmma_bits(struct kvm *kvm,
21954036e387SClaudio Imbrenda 				  struct kvm_s390_cmma_log *args)
21964036e387SClaudio Imbrenda {
2197afdad616SClaudio Imbrenda 	unsigned long bufsize;
2198afdad616SClaudio Imbrenda 	int srcu_idx, peek, ret;
2199afdad616SClaudio Imbrenda 	u8 *values;
22004036e387SClaudio Imbrenda 
2201afdad616SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
22024036e387SClaudio Imbrenda 		return -ENXIO;
22034036e387SClaudio Imbrenda 	/* Invalid/unsupported flags were specified */
22044036e387SClaudio Imbrenda 	if (args->flags & ~KVM_S390_CMMA_PEEK)
22054036e387SClaudio Imbrenda 		return -EINVAL;
22064036e387SClaudio Imbrenda 	/* Migration mode query, and we are not doing a migration */
22074036e387SClaudio Imbrenda 	peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2208afdad616SClaudio Imbrenda 	if (!peek && !kvm->arch.migration_mode)
22094036e387SClaudio Imbrenda 		return -EINVAL;
22104036e387SClaudio Imbrenda 	/* CMMA is disabled or was not used, or the buffer has length zero */
22114036e387SClaudio Imbrenda 	bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2212c9f0a2b8SJanosch Frank 	if (!bufsize || !kvm->mm->context.uses_cmm) {
22134036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
22144036e387SClaudio Imbrenda 		return 0;
22154036e387SClaudio Imbrenda 	}
22164036e387SClaudio Imbrenda 	/* We are not peeking, and there are no dirty pages */
2217afdad616SClaudio Imbrenda 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
22184036e387SClaudio Imbrenda 		memset(args, 0, sizeof(*args));
22194036e387SClaudio Imbrenda 		return 0;
22204036e387SClaudio Imbrenda 	}
22214036e387SClaudio Imbrenda 
2222afdad616SClaudio Imbrenda 	values = vmalloc(bufsize);
2223afdad616SClaudio Imbrenda 	if (!values)
22244036e387SClaudio Imbrenda 		return -ENOMEM;
22254036e387SClaudio Imbrenda 
2226d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
22274036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
2228afdad616SClaudio Imbrenda 	if (peek)
2229afdad616SClaudio Imbrenda 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2230afdad616SClaudio Imbrenda 	else
2231afdad616SClaudio Imbrenda 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
22324036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2233d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
22344036e387SClaudio Imbrenda 
2235afdad616SClaudio Imbrenda 	if (kvm->arch.migration_mode)
2236afdad616SClaudio Imbrenda 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2237afdad616SClaudio Imbrenda 	else
2238afdad616SClaudio Imbrenda 		args->remaining = 0;
22394036e387SClaudio Imbrenda 
2240afdad616SClaudio Imbrenda 	if (copy_to_user((void __user *)args->values, values, args->count))
2241afdad616SClaudio Imbrenda 		ret = -EFAULT;
2242afdad616SClaudio Imbrenda 
2243afdad616SClaudio Imbrenda 	vfree(values);
2244afdad616SClaudio Imbrenda 	return ret;
22454036e387SClaudio Imbrenda }
22464036e387SClaudio Imbrenda 
22474036e387SClaudio Imbrenda /*
22484036e387SClaudio Imbrenda  * This function sets the CMMA attributes for the given pages. If the input
22494036e387SClaudio Imbrenda  * buffer has zero length, no action is taken, otherwise the attributes are
2250c9f0a2b8SJanosch Frank  * set and the mm->context.uses_cmm flag is set.
22514036e387SClaudio Imbrenda  */
22524036e387SClaudio Imbrenda static int kvm_s390_set_cmma_bits(struct kvm *kvm,
22534036e387SClaudio Imbrenda 				  const struct kvm_s390_cmma_log *args)
22544036e387SClaudio Imbrenda {
22554036e387SClaudio Imbrenda 	unsigned long hva, mask, pgstev, i;
22564036e387SClaudio Imbrenda 	uint8_t *bits;
22574036e387SClaudio Imbrenda 	int srcu_idx, r = 0;
22584036e387SClaudio Imbrenda 
22594036e387SClaudio Imbrenda 	mask = args->mask;
22604036e387SClaudio Imbrenda 
22614036e387SClaudio Imbrenda 	if (!kvm->arch.use_cmma)
22624036e387SClaudio Imbrenda 		return -ENXIO;
22634036e387SClaudio Imbrenda 	/* invalid/unsupported flags */
22644036e387SClaudio Imbrenda 	if (args->flags != 0)
22654036e387SClaudio Imbrenda 		return -EINVAL;
22664036e387SClaudio Imbrenda 	/* Enforce sane limit on memory allocation */
22674036e387SClaudio Imbrenda 	if (args->count > KVM_S390_CMMA_SIZE_MAX)
22684036e387SClaudio Imbrenda 		return -EINVAL;
22694036e387SClaudio Imbrenda 	/* Nothing to do */
22704036e387SClaudio Imbrenda 	if (args->count == 0)
22714036e387SClaudio Imbrenda 		return 0;
22724036e387SClaudio Imbrenda 
227342bc47b3SKees Cook 	bits = vmalloc(array_size(sizeof(*bits), args->count));
22744036e387SClaudio Imbrenda 	if (!bits)
22754036e387SClaudio Imbrenda 		return -ENOMEM;
22764036e387SClaudio Imbrenda 
22774036e387SClaudio Imbrenda 	r = copy_from_user(bits, (void __user *)args->values, args->count);
22784036e387SClaudio Imbrenda 	if (r) {
22794036e387SClaudio Imbrenda 		r = -EFAULT;
22804036e387SClaudio Imbrenda 		goto out;
22814036e387SClaudio Imbrenda 	}
22824036e387SClaudio Imbrenda 
2283d8ed45c5SMichel Lespinasse 	mmap_read_lock(kvm->mm);
22844036e387SClaudio Imbrenda 	srcu_idx = srcu_read_lock(&kvm->srcu);
22854036e387SClaudio Imbrenda 	for (i = 0; i < args->count; i++) {
22864036e387SClaudio Imbrenda 		hva = gfn_to_hva(kvm, args->start_gfn + i);
22874036e387SClaudio Imbrenda 		if (kvm_is_error_hva(hva)) {
22884036e387SClaudio Imbrenda 			r = -EFAULT;
22894036e387SClaudio Imbrenda 			break;
22904036e387SClaudio Imbrenda 		}
22914036e387SClaudio Imbrenda 
22924036e387SClaudio Imbrenda 		pgstev = bits[i];
22934036e387SClaudio Imbrenda 		pgstev = pgstev << 24;
22941bab1c02SClaudio Imbrenda 		mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
22954036e387SClaudio Imbrenda 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
22964036e387SClaudio Imbrenda 	}
22974036e387SClaudio Imbrenda 	srcu_read_unlock(&kvm->srcu, srcu_idx);
2298d8ed45c5SMichel Lespinasse 	mmap_read_unlock(kvm->mm);
22994036e387SClaudio Imbrenda 
2300c9f0a2b8SJanosch Frank 	if (!kvm->mm->context.uses_cmm) {
2301d8ed45c5SMichel Lespinasse 		mmap_write_lock(kvm->mm);
2302c9f0a2b8SJanosch Frank 		kvm->mm->context.uses_cmm = 1;
2303d8ed45c5SMichel Lespinasse 		mmap_write_unlock(kvm->mm);
23044036e387SClaudio Imbrenda 	}
23054036e387SClaudio Imbrenda out:
23064036e387SClaudio Imbrenda 	vfree(bits);
23074036e387SClaudio Imbrenda 	return r;
23084036e387SClaudio Imbrenda }
23094036e387SClaudio Imbrenda 
231029b40f10SJanosch Frank static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
231129b40f10SJanosch Frank {
231229b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
231329b40f10SJanosch Frank 	u16 rc, rrc;
231429b40f10SJanosch Frank 	int ret = 0;
231529b40f10SJanosch Frank 	int i;
231629b40f10SJanosch Frank 
231729b40f10SJanosch Frank 	/*
231829b40f10SJanosch Frank 	 * We ignore failures and try to destroy as many CPUs as possible.
231929b40f10SJanosch Frank 	 * At the same time we must not free the assigned resources when
232029b40f10SJanosch Frank 	 * this fails, as the ultravisor has still access to that memory.
232129b40f10SJanosch Frank 	 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
232229b40f10SJanosch Frank 	 * behind.
232329b40f10SJanosch Frank 	 * We want to return the first failure rc and rrc, though.
232429b40f10SJanosch Frank 	 */
232529b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
232629b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
232729b40f10SJanosch Frank 		if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
232829b40f10SJanosch Frank 			*rcp = rc;
232929b40f10SJanosch Frank 			*rrcp = rrc;
233029b40f10SJanosch Frank 			ret = -EIO;
233129b40f10SJanosch Frank 		}
233229b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
233329b40f10SJanosch Frank 	}
233429b40f10SJanosch Frank 	return ret;
233529b40f10SJanosch Frank }
233629b40f10SJanosch Frank 
233729b40f10SJanosch Frank static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
233829b40f10SJanosch Frank {
233929b40f10SJanosch Frank 	int i, r = 0;
234029b40f10SJanosch Frank 	u16 dummy;
234129b40f10SJanosch Frank 
234229b40f10SJanosch Frank 	struct kvm_vcpu *vcpu;
234329b40f10SJanosch Frank 
234429b40f10SJanosch Frank 	kvm_for_each_vcpu(i, vcpu, kvm) {
234529b40f10SJanosch Frank 		mutex_lock(&vcpu->mutex);
234629b40f10SJanosch Frank 		r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
234729b40f10SJanosch Frank 		mutex_unlock(&vcpu->mutex);
234829b40f10SJanosch Frank 		if (r)
234929b40f10SJanosch Frank 			break;
235029b40f10SJanosch Frank 	}
235129b40f10SJanosch Frank 	if (r)
235229b40f10SJanosch Frank 		kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
235329b40f10SJanosch Frank 	return r;
235429b40f10SJanosch Frank }
235529b40f10SJanosch Frank 
235629b40f10SJanosch Frank static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
235729b40f10SJanosch Frank {
235829b40f10SJanosch Frank 	int r = 0;
235929b40f10SJanosch Frank 	u16 dummy;
236029b40f10SJanosch Frank 	void __user *argp = (void __user *)cmd->data;
236129b40f10SJanosch Frank 
236229b40f10SJanosch Frank 	switch (cmd->cmd) {
236329b40f10SJanosch Frank 	case KVM_PV_ENABLE: {
236429b40f10SJanosch Frank 		r = -EINVAL;
236529b40f10SJanosch Frank 		if (kvm_s390_pv_is_protected(kvm))
236629b40f10SJanosch Frank 			break;
236729b40f10SJanosch Frank 
236829b40f10SJanosch Frank 		/*
236929b40f10SJanosch Frank 		 *  FMT 4 SIE needs esca. As we never switch back to bsca from
237029b40f10SJanosch Frank 		 *  esca, we need no cleanup in the error cases below
237129b40f10SJanosch Frank 		 */
237229b40f10SJanosch Frank 		r = sca_switch_to_extended(kvm);
237329b40f10SJanosch Frank 		if (r)
237429b40f10SJanosch Frank 			break;
237529b40f10SJanosch Frank 
2376d8ed45c5SMichel Lespinasse 		mmap_write_lock(current->mm);
2377fa0c5eabSJanosch Frank 		r = gmap_mark_unmergeable();
2378d8ed45c5SMichel Lespinasse 		mmap_write_unlock(current->mm);
2379fa0c5eabSJanosch Frank 		if (r)
2380fa0c5eabSJanosch Frank 			break;
2381fa0c5eabSJanosch Frank 
238229b40f10SJanosch Frank 		r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
238329b40f10SJanosch Frank 		if (r)
238429b40f10SJanosch Frank 			break;
238529b40f10SJanosch Frank 
238629b40f10SJanosch Frank 		r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
238729b40f10SJanosch Frank 		if (r)
238829b40f10SJanosch Frank 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
23890890ddeaSChristian Borntraeger 
23900890ddeaSChristian Borntraeger 		/* we need to block service interrupts from now on */
23910890ddeaSChristian Borntraeger 		set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
239229b40f10SJanosch Frank 		break;
239329b40f10SJanosch Frank 	}
239429b40f10SJanosch Frank 	case KVM_PV_DISABLE: {
239529b40f10SJanosch Frank 		r = -EINVAL;
239629b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
239729b40f10SJanosch Frank 			break;
239829b40f10SJanosch Frank 
239929b40f10SJanosch Frank 		r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
240029b40f10SJanosch Frank 		/*
240129b40f10SJanosch Frank 		 * If a CPU could not be destroyed, destroy VM will also fail.
240229b40f10SJanosch Frank 		 * There is no point in trying to destroy it. Instead return
240329b40f10SJanosch Frank 		 * the rc and rrc from the first CPU that failed destroying.
240429b40f10SJanosch Frank 		 */
240529b40f10SJanosch Frank 		if (r)
240629b40f10SJanosch Frank 			break;
240729b40f10SJanosch Frank 		r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
24080890ddeaSChristian Borntraeger 
24090890ddeaSChristian Borntraeger 		/* no need to block service interrupts any more */
24100890ddeaSChristian Borntraeger 		clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
241129b40f10SJanosch Frank 		break;
241229b40f10SJanosch Frank 	}
241329b40f10SJanosch Frank 	case KVM_PV_SET_SEC_PARMS: {
241429b40f10SJanosch Frank 		struct kvm_s390_pv_sec_parm parms = {};
241529b40f10SJanosch Frank 		void *hdr;
241629b40f10SJanosch Frank 
241729b40f10SJanosch Frank 		r = -EINVAL;
241829b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
241929b40f10SJanosch Frank 			break;
242029b40f10SJanosch Frank 
242129b40f10SJanosch Frank 		r = -EFAULT;
242229b40f10SJanosch Frank 		if (copy_from_user(&parms, argp, sizeof(parms)))
242329b40f10SJanosch Frank 			break;
242429b40f10SJanosch Frank 
242529b40f10SJanosch Frank 		/* Currently restricted to 8KB */
242629b40f10SJanosch Frank 		r = -EINVAL;
242729b40f10SJanosch Frank 		if (parms.length > PAGE_SIZE * 2)
242829b40f10SJanosch Frank 			break;
242929b40f10SJanosch Frank 
243029b40f10SJanosch Frank 		r = -ENOMEM;
243129b40f10SJanosch Frank 		hdr = vmalloc(parms.length);
243229b40f10SJanosch Frank 		if (!hdr)
243329b40f10SJanosch Frank 			break;
243429b40f10SJanosch Frank 
243529b40f10SJanosch Frank 		r = -EFAULT;
243629b40f10SJanosch Frank 		if (!copy_from_user(hdr, (void __user *)parms.origin,
243729b40f10SJanosch Frank 				    parms.length))
243829b40f10SJanosch Frank 			r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
243929b40f10SJanosch Frank 						      &cmd->rc, &cmd->rrc);
244029b40f10SJanosch Frank 
244129b40f10SJanosch Frank 		vfree(hdr);
244229b40f10SJanosch Frank 		break;
244329b40f10SJanosch Frank 	}
244429b40f10SJanosch Frank 	case KVM_PV_UNPACK: {
244529b40f10SJanosch Frank 		struct kvm_s390_pv_unp unp = {};
244629b40f10SJanosch Frank 
244729b40f10SJanosch Frank 		r = -EINVAL;
24481ed576a2SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
244929b40f10SJanosch Frank 			break;
245029b40f10SJanosch Frank 
245129b40f10SJanosch Frank 		r = -EFAULT;
245229b40f10SJanosch Frank 		if (copy_from_user(&unp, argp, sizeof(unp)))
245329b40f10SJanosch Frank 			break;
245429b40f10SJanosch Frank 
245529b40f10SJanosch Frank 		r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
245629b40f10SJanosch Frank 				       &cmd->rc, &cmd->rrc);
245729b40f10SJanosch Frank 		break;
245829b40f10SJanosch Frank 	}
245929b40f10SJanosch Frank 	case KVM_PV_VERIFY: {
246029b40f10SJanosch Frank 		r = -EINVAL;
246129b40f10SJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
246229b40f10SJanosch Frank 			break;
246329b40f10SJanosch Frank 
246429b40f10SJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
246529b40f10SJanosch Frank 				  UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
246629b40f10SJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
246729b40f10SJanosch Frank 			     cmd->rrc);
246829b40f10SJanosch Frank 		break;
246929b40f10SJanosch Frank 	}
2470e0d2773dSJanosch Frank 	case KVM_PV_PREP_RESET: {
2471e0d2773dSJanosch Frank 		r = -EINVAL;
2472e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2473e0d2773dSJanosch Frank 			break;
2474e0d2773dSJanosch Frank 
2475e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2476e0d2773dSJanosch Frank 				  UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2477e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2478e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2479e0d2773dSJanosch Frank 		break;
2480e0d2773dSJanosch Frank 	}
2481e0d2773dSJanosch Frank 	case KVM_PV_UNSHARE_ALL: {
2482e0d2773dSJanosch Frank 		r = -EINVAL;
2483e0d2773dSJanosch Frank 		if (!kvm_s390_pv_is_protected(kvm))
2484e0d2773dSJanosch Frank 			break;
2485e0d2773dSJanosch Frank 
2486e0d2773dSJanosch Frank 		r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2487e0d2773dSJanosch Frank 				  UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2488e0d2773dSJanosch Frank 		KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2489e0d2773dSJanosch Frank 			     cmd->rc, cmd->rrc);
2490e0d2773dSJanosch Frank 		break;
2491e0d2773dSJanosch Frank 	}
249229b40f10SJanosch Frank 	default:
249329b40f10SJanosch Frank 		r = -ENOTTY;
249429b40f10SJanosch Frank 	}
249529b40f10SJanosch Frank 	return r;
249629b40f10SJanosch Frank }
249729b40f10SJanosch Frank 
2498b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
2499b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
2500b0c632dbSHeiko Carstens {
2501b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
2502b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2503f2061656SDominik Dingel 	struct kvm_device_attr attr;
2504b0c632dbSHeiko Carstens 	int r;
2505b0c632dbSHeiko Carstens 
2506b0c632dbSHeiko Carstens 	switch (ioctl) {
2507ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
2508ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2509ba5c1e9bSCarsten Otte 
2510ba5c1e9bSCarsten Otte 		r = -EFAULT;
2511ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
2512ba5c1e9bSCarsten Otte 			break;
2513ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
2514ba5c1e9bSCarsten Otte 		break;
2515ba5c1e9bSCarsten Otte 	}
251684223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
251784223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
251884223598SCornelia Huck 
251984223598SCornelia Huck 		r = -EINVAL;
252084223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
252184223598SCornelia Huck 			/* Set up dummy routing. */
252284223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
2523152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
252484223598SCornelia Huck 		}
252584223598SCornelia Huck 		break;
252684223598SCornelia Huck 	}
2527f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
2528f2061656SDominik Dingel 		r = -EFAULT;
2529f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2530f2061656SDominik Dingel 			break;
2531f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
2532f2061656SDominik Dingel 		break;
2533f2061656SDominik Dingel 	}
2534f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
2535f2061656SDominik Dingel 		r = -EFAULT;
2536f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2537f2061656SDominik Dingel 			break;
2538f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
2539f2061656SDominik Dingel 		break;
2540f2061656SDominik Dingel 	}
2541f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
2542f2061656SDominik Dingel 		r = -EFAULT;
2543f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2544f2061656SDominik Dingel 			break;
2545f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
2546f2061656SDominik Dingel 		break;
2547f2061656SDominik Dingel 	}
254830ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
254930ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
255030ee2a98SJason J. Herne 
255130ee2a98SJason J. Herne 		r = -EFAULT;
255230ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
255330ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
255430ee2a98SJason J. Herne 			break;
255530ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
255630ee2a98SJason J. Herne 		break;
255730ee2a98SJason J. Herne 	}
255830ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
255930ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
256030ee2a98SJason J. Herne 
256130ee2a98SJason J. Herne 		r = -EFAULT;
256230ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
256330ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
256430ee2a98SJason J. Herne 			break;
256530ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
256630ee2a98SJason J. Herne 		break;
256730ee2a98SJason J. Herne 	}
25684036e387SClaudio Imbrenda 	case KVM_S390_GET_CMMA_BITS: {
25694036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
25704036e387SClaudio Imbrenda 
25714036e387SClaudio Imbrenda 		r = -EFAULT;
25724036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
25734036e387SClaudio Imbrenda 			break;
25741de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
25754036e387SClaudio Imbrenda 		r = kvm_s390_get_cmma_bits(kvm, &args);
25761de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
25774036e387SClaudio Imbrenda 		if (!r) {
25784036e387SClaudio Imbrenda 			r = copy_to_user(argp, &args, sizeof(args));
25794036e387SClaudio Imbrenda 			if (r)
25804036e387SClaudio Imbrenda 				r = -EFAULT;
25814036e387SClaudio Imbrenda 		}
25824036e387SClaudio Imbrenda 		break;
25834036e387SClaudio Imbrenda 	}
25844036e387SClaudio Imbrenda 	case KVM_S390_SET_CMMA_BITS: {
25854036e387SClaudio Imbrenda 		struct kvm_s390_cmma_log args;
25864036e387SClaudio Imbrenda 
25874036e387SClaudio Imbrenda 		r = -EFAULT;
25884036e387SClaudio Imbrenda 		if (copy_from_user(&args, argp, sizeof(args)))
25894036e387SClaudio Imbrenda 			break;
25901de1ea7eSChristian Borntraeger 		mutex_lock(&kvm->slots_lock);
25914036e387SClaudio Imbrenda 		r = kvm_s390_set_cmma_bits(kvm, &args);
25921de1ea7eSChristian Borntraeger 		mutex_unlock(&kvm->slots_lock);
25934036e387SClaudio Imbrenda 		break;
25944036e387SClaudio Imbrenda 	}
259529b40f10SJanosch Frank 	case KVM_S390_PV_COMMAND: {
259629b40f10SJanosch Frank 		struct kvm_pv_cmd args;
259729b40f10SJanosch Frank 
2598fe28c786SJanosch Frank 		/* protvirt means user sigp */
2599fe28c786SJanosch Frank 		kvm->arch.user_cpu_state_ctrl = 1;
260029b40f10SJanosch Frank 		r = 0;
260129b40f10SJanosch Frank 		if (!is_prot_virt_host()) {
260229b40f10SJanosch Frank 			r = -EINVAL;
260329b40f10SJanosch Frank 			break;
260429b40f10SJanosch Frank 		}
260529b40f10SJanosch Frank 		if (copy_from_user(&args, argp, sizeof(args))) {
260629b40f10SJanosch Frank 			r = -EFAULT;
260729b40f10SJanosch Frank 			break;
260829b40f10SJanosch Frank 		}
260929b40f10SJanosch Frank 		if (args.flags) {
261029b40f10SJanosch Frank 			r = -EINVAL;
261129b40f10SJanosch Frank 			break;
261229b40f10SJanosch Frank 		}
261329b40f10SJanosch Frank 		mutex_lock(&kvm->lock);
261429b40f10SJanosch Frank 		r = kvm_s390_handle_pv(kvm, &args);
261529b40f10SJanosch Frank 		mutex_unlock(&kvm->lock);
261629b40f10SJanosch Frank 		if (copy_to_user(argp, &args, sizeof(args))) {
261729b40f10SJanosch Frank 			r = -EFAULT;
261829b40f10SJanosch Frank 			break;
261929b40f10SJanosch Frank 		}
262029b40f10SJanosch Frank 		break;
262129b40f10SJanosch Frank 	}
2622b0c632dbSHeiko Carstens 	default:
2623367e1319SAvi Kivity 		r = -ENOTTY;
2624b0c632dbSHeiko Carstens 	}
2625b0c632dbSHeiko Carstens 
2626b0c632dbSHeiko Carstens 	return r;
2627b0c632dbSHeiko Carstens }
2628b0c632dbSHeiko Carstens 
262945c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
263045c9b47cSTony Krowiak {
2631e585b24aSTony Krowiak 	struct ap_config_info info;
263245c9b47cSTony Krowiak 
2633e585b24aSTony Krowiak 	if (ap_instructions_available()) {
2634e585b24aSTony Krowiak 		if (ap_qci(&info) == 0)
2635e585b24aSTony Krowiak 			return info.apxa;
263645c9b47cSTony Krowiak 	}
263745c9b47cSTony Krowiak 
263845c9b47cSTony Krowiak 	return 0;
263945c9b47cSTony Krowiak }
264045c9b47cSTony Krowiak 
2641e585b24aSTony Krowiak /*
2642e585b24aSTony Krowiak  * The format of the crypto control block (CRYCB) is specified in the 3 low
2643e585b24aSTony Krowiak  * order bits of the CRYCB designation (CRYCBD) field as follows:
2644e585b24aSTony Krowiak  * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2645e585b24aSTony Krowiak  *	     AP extended addressing (APXA) facility are installed.
2646e585b24aSTony Krowiak  * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2647e585b24aSTony Krowiak  * Format 2: Both the APXA and MSAX3 facilities are installed
2648e585b24aSTony Krowiak  */
264945c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
265045c9b47cSTony Krowiak {
265145c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
265245c9b47cSTony Krowiak 
2653e585b24aSTony Krowiak 	/* Clear the CRYCB format bits - i.e., set format 0 by default */
2654e585b24aSTony Krowiak 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2655e585b24aSTony Krowiak 
2656e585b24aSTony Krowiak 	/* Check whether MSAX3 is installed */
2657e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
2658e585b24aSTony Krowiak 		return;
2659e585b24aSTony Krowiak 
266045c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
266145c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
266245c9b47cSTony Krowiak 	else
266345c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
266445c9b47cSTony Krowiak }
266545c9b47cSTony Krowiak 
26660e237e44SPierre Morel void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
26670e237e44SPierre Morel 			       unsigned long *aqm, unsigned long *adm)
26680e237e44SPierre Morel {
26690e237e44SPierre Morel 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
26700e237e44SPierre Morel 
26710e237e44SPierre Morel 	mutex_lock(&kvm->lock);
26720e237e44SPierre Morel 	kvm_s390_vcpu_block_all(kvm);
26730e237e44SPierre Morel 
26740e237e44SPierre Morel 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
26750e237e44SPierre Morel 	case CRYCB_FORMAT2: /* APCB1 use 256 bits */
26760e237e44SPierre Morel 		memcpy(crycb->apcb1.apm, apm, 32);
26770e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
26780e237e44SPierre Morel 			 apm[0], apm[1], apm[2], apm[3]);
26790e237e44SPierre Morel 		memcpy(crycb->apcb1.aqm, aqm, 32);
26800e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
26810e237e44SPierre Morel 			 aqm[0], aqm[1], aqm[2], aqm[3]);
26820e237e44SPierre Morel 		memcpy(crycb->apcb1.adm, adm, 32);
26830e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
26840e237e44SPierre Morel 			 adm[0], adm[1], adm[2], adm[3]);
26850e237e44SPierre Morel 		break;
26860e237e44SPierre Morel 	case CRYCB_FORMAT1:
26870e237e44SPierre Morel 	case CRYCB_FORMAT0: /* Fall through both use APCB0 */
26880e237e44SPierre Morel 		memcpy(crycb->apcb0.apm, apm, 8);
26890e237e44SPierre Morel 		memcpy(crycb->apcb0.aqm, aqm, 2);
26900e237e44SPierre Morel 		memcpy(crycb->apcb0.adm, adm, 2);
26910e237e44SPierre Morel 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
26920e237e44SPierre Morel 			 apm[0], *((unsigned short *)aqm),
26930e237e44SPierre Morel 			 *((unsigned short *)adm));
26940e237e44SPierre Morel 		break;
26950e237e44SPierre Morel 	default:	/* Can not happen */
26960e237e44SPierre Morel 		break;
26970e237e44SPierre Morel 	}
26980e237e44SPierre Morel 
26990e237e44SPierre Morel 	/* recreate the shadow crycb for each vcpu */
27000e237e44SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
27010e237e44SPierre Morel 	kvm_s390_vcpu_unblock_all(kvm);
27020e237e44SPierre Morel 	mutex_unlock(&kvm->lock);
27030e237e44SPierre Morel }
27040e237e44SPierre Morel EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
27050e237e44SPierre Morel 
270642104598STony Krowiak void kvm_arch_crypto_clear_masks(struct kvm *kvm)
270742104598STony Krowiak {
270842104598STony Krowiak 	mutex_lock(&kvm->lock);
270942104598STony Krowiak 	kvm_s390_vcpu_block_all(kvm);
271042104598STony Krowiak 
271142104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
271242104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb0));
271342104598STony Krowiak 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
271442104598STony Krowiak 	       sizeof(kvm->arch.crypto.crycb->apcb1));
271542104598STony Krowiak 
27160e237e44SPierre Morel 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
27176cc571b1SPierre Morel 	/* recreate the shadow crycb for each vcpu */
27186cc571b1SPierre Morel 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
271942104598STony Krowiak 	kvm_s390_vcpu_unblock_all(kvm);
272042104598STony Krowiak 	mutex_unlock(&kvm->lock);
272142104598STony Krowiak }
272242104598STony Krowiak EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
272342104598STony Krowiak 
27249bb0ec09SDavid Hildenbrand static u64 kvm_s390_get_initial_cpuid(void)
27259d8d5786SMichael Mueller {
27269bb0ec09SDavid Hildenbrand 	struct cpuid cpuid;
27279bb0ec09SDavid Hildenbrand 
27289bb0ec09SDavid Hildenbrand 	get_cpu_id(&cpuid);
27299bb0ec09SDavid Hildenbrand 	cpuid.version = 0xff;
27309bb0ec09SDavid Hildenbrand 	return *((u64 *) &cpuid);
27319d8d5786SMichael Mueller }
27329d8d5786SMichael Mueller 
2733c54f0d6aSDavid Hildenbrand static void kvm_s390_crypto_init(struct kvm *kvm)
27345102ee87STony Krowiak {
2735c54f0d6aSDavid Hildenbrand 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
273645c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
27375102ee87STony Krowiak 
2738e585b24aSTony Krowiak 	if (!test_kvm_facility(kvm, 76))
2739e585b24aSTony Krowiak 		return;
2740e585b24aSTony Krowiak 
2741ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
2742ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
2743ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
2744ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2745ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2746ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2747ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
27485102ee87STony Krowiak }
27495102ee87STony Krowiak 
27507d43bafcSEugene (jno) Dvurechenski static void sca_dispose(struct kvm *kvm)
27517d43bafcSEugene (jno) Dvurechenski {
27527d43bafcSEugene (jno) Dvurechenski 	if (kvm->arch.use_esca)
27535e044315SEugene (jno) Dvurechenski 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
27547d43bafcSEugene (jno) Dvurechenski 	else
27557d43bafcSEugene (jno) Dvurechenski 		free_page((unsigned long)(kvm->arch.sca));
27567d43bafcSEugene (jno) Dvurechenski 	kvm->arch.sca = NULL;
27577d43bafcSEugene (jno) Dvurechenski }
27587d43bafcSEugene (jno) Dvurechenski 
2759e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
2760b0c632dbSHeiko Carstens {
2761c4196218SChristian Borntraeger 	gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
27629d8d5786SMichael Mueller 	int i, rc;
2763b0c632dbSHeiko Carstens 	char debug_name[16];
2764f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
2765b0c632dbSHeiko Carstens 
2766e08b9637SCarsten Otte 	rc = -EINVAL;
2767e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
2768e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
2769e08b9637SCarsten Otte 		goto out_err;
2770e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2771e08b9637SCarsten Otte 		goto out_err;
2772e08b9637SCarsten Otte #else
2773e08b9637SCarsten Otte 	if (type)
2774e08b9637SCarsten Otte 		goto out_err;
2775e08b9637SCarsten Otte #endif
2776e08b9637SCarsten Otte 
2777b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
2778b0c632dbSHeiko Carstens 	if (rc)
2779d89f5effSJan Kiszka 		goto out_err;
2780b0c632dbSHeiko Carstens 
2781b290411aSCarsten Otte 	rc = -ENOMEM;
2782b290411aSCarsten Otte 
278376a6dd72SDavid Hildenbrand 	if (!sclp.has_64bscao)
278476a6dd72SDavid Hildenbrand 		alloc_flags |= GFP_DMA;
27855e044315SEugene (jno) Dvurechenski 	rwlock_init(&kvm->arch.sca_lock);
27869ac96d75SDavid Hildenbrand 	/* start with basic SCA */
278776a6dd72SDavid Hildenbrand 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
2788b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
2789d89f5effSJan Kiszka 		goto out_err;
27900d9ce162SJunaid Shahid 	mutex_lock(&kvm_lock);
2791c5c2c393SDavid Hildenbrand 	sca_offset += 16;
2792bc784cceSEugene (jno) Dvurechenski 	if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
2793c5c2c393SDavid Hildenbrand 		sca_offset = 0;
2794bc784cceSEugene (jno) Dvurechenski 	kvm->arch.sca = (struct bsca_block *)
2795bc784cceSEugene (jno) Dvurechenski 			((char *) kvm->arch.sca + sca_offset);
27960d9ce162SJunaid Shahid 	mutex_unlock(&kvm_lock);
2797b0c632dbSHeiko Carstens 
2798b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
2799b0c632dbSHeiko Carstens 
28001cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
2801b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
280240f5b735SDominik Dingel 		goto out_err;
2803b0c632dbSHeiko Carstens 
280419114bebSMichael Mueller 	BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
2805c54f0d6aSDavid Hildenbrand 	kvm->arch.sie_page2 =
2806c4196218SChristian Borntraeger 	     (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
2807c54f0d6aSDavid Hildenbrand 	if (!kvm->arch.sie_page2)
280840f5b735SDominik Dingel 		goto out_err;
28099d8d5786SMichael Mueller 
281025c84dbaSMichael Mueller 	kvm->arch.sie_page2->kvm = kvm;
2811c54f0d6aSDavid Hildenbrand 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
2812c3b9e3e1SChristian Borntraeger 
2813c3b9e3e1SChristian Borntraeger 	for (i = 0; i < kvm_s390_fac_size(); i++) {
2814c3b9e3e1SChristian Borntraeger 		kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2815c3b9e3e1SChristian Borntraeger 					      (kvm_s390_fac_base[i] |
2816c3b9e3e1SChristian Borntraeger 					       kvm_s390_fac_ext[i]);
2817c3b9e3e1SChristian Borntraeger 		kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2818c3b9e3e1SChristian Borntraeger 					      kvm_s390_fac_base[i];
2819c3b9e3e1SChristian Borntraeger 	}
2820346fa2f8SChristian Borntraeger 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
2821981467c9SMichael Mueller 
28221935222dSDavid Hildenbrand 	/* we are always in czam mode - even on pre z14 machines */
28231935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
28241935222dSDavid Hildenbrand 	set_kvm_facility(kvm->arch.model.fac_list, 138);
28251935222dSDavid Hildenbrand 	/* we emulate STHYI in kvm */
282695ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
282795ca2cb5SJanosch Frank 	set_kvm_facility(kvm->arch.model.fac_list, 74);
28281bab1c02SClaudio Imbrenda 	if (MACHINE_HAS_TLB_GUEST) {
28291bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
28301bab1c02SClaudio Imbrenda 		set_kvm_facility(kvm->arch.model.fac_list, 147);
28311bab1c02SClaudio Imbrenda 	}
283295ca2cb5SJanosch Frank 
283305f31e3bSPierre Morel 	if (css_general_characteristics.aiv && test_facility(65))
283405f31e3bSPierre Morel 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
283505f31e3bSPierre Morel 
28369bb0ec09SDavid Hildenbrand 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
283737c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
28389d8d5786SMichael Mueller 
2839c54f0d6aSDavid Hildenbrand 	kvm_s390_crypto_init(kvm);
28405102ee87STony Krowiak 
284151978393SFei Li 	mutex_init(&kvm->arch.float_int.ais_lock);
2842ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
28436d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
28446d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
28458a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
2846a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
2847ba5c1e9bSCarsten Otte 
2848b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
284978f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
2850b0c632dbSHeiko Carstens 
2851e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
2852e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
2853a3a92c31SDominik Dingel 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
2854e08b9637SCarsten Otte 	} else {
285532e6b236SGuenther Hutzl 		if (sclp.hamax == U64_MAX)
2856ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = TASK_SIZE_MAX;
285732e6b236SGuenther Hutzl 		else
2858ee71d16dSMartin Schwidefsky 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
285932e6b236SGuenther Hutzl 						    sclp.hamax + 1);
28606ea427bbSMartin Schwidefsky 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
2861598841caSCarsten Otte 		if (!kvm->arch.gmap)
286240f5b735SDominik Dingel 			goto out_err;
28632c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
286424eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
2865e08b9637SCarsten Otte 	}
2866fa6b7fe9SCornelia Huck 
2867c9f0a2b8SJanosch Frank 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
286855531b74SJanosch Frank 	kvm->arch.use_skf = sclp.has_skey;
28698ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
2870a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_init(kvm);
2871cc674ef2SMichael Mueller 	if (use_gisa)
2872d7c5cb01SMichael Mueller 		kvm_s390_gisa_init(kvm);
28738335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
28748ad35755SDavid Hildenbrand 
2875d89f5effSJan Kiszka 	return 0;
2876d89f5effSJan Kiszka out_err:
2877c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
287840f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
28797d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
288078f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
2881d89f5effSJan Kiszka 	return rc;
2882b0c632dbSHeiko Carstens }
2883b0c632dbSHeiko Carstens 
2884d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2885d329c035SChristian Borntraeger {
288629b40f10SJanosch Frank 	u16 rc, rrc;
288729b40f10SJanosch Frank 
2888d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
2889ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
289067335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
28913c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
2892bc784cceSEugene (jno) Dvurechenski 	if (!kvm_is_ucontrol(vcpu->kvm))
2893a6e2f683SEugene (jno) Dvurechenski 		sca_del_vcpu(vcpu);
289427e0393fSCarsten Otte 
289527e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
28966ea427bbSMartin Schwidefsky 		gmap_remove(vcpu->arch.gmap);
289727e0393fSCarsten Otte 
2898e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
2899b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
290029b40f10SJanosch Frank 	/* We can not hold the vcpu mutex here, we are already dying */
290129b40f10SJanosch Frank 	if (kvm_s390_pv_cpu_get_handle(vcpu))
290229b40f10SJanosch Frank 		kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
2903d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
2904d329c035SChristian Borntraeger }
2905d329c035SChristian Borntraeger 
2906d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
2907d329c035SChristian Borntraeger {
2908d329c035SChristian Borntraeger 	unsigned int i;
2909988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
2910d329c035SChristian Borntraeger 
2911988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
29124543bdc0SSean Christopherson 		kvm_vcpu_destroy(vcpu);
2913988a2caeSGleb Natapov 
2914988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
2915988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2916d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
2917988a2caeSGleb Natapov 
2918988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
2919988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
2920d329c035SChristian Borntraeger }
2921d329c035SChristian Borntraeger 
2922b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
2923b0c632dbSHeiko Carstens {
292429b40f10SJanosch Frank 	u16 rc, rrc;
292529b40f10SJanosch Frank 
2926d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
29277d43bafcSEugene (jno) Dvurechenski 	sca_dispose(kvm);
2928d7c5cb01SMichael Mueller 	kvm_s390_gisa_destroy(kvm);
292929b40f10SJanosch Frank 	/*
293029b40f10SJanosch Frank 	 * We are already at the end of life and kvm->lock is not taken.
293129b40f10SJanosch Frank 	 * This is ok as the file descriptor is closed by now and nobody
293229b40f10SJanosch Frank 	 * can mess with the pv state. To avoid lockdep_assert_held from
293329b40f10SJanosch Frank 	 * complaining we do not use kvm_s390_pv_is_protected.
293429b40f10SJanosch Frank 	 */
293529b40f10SJanosch Frank 	if (kvm_s390_pv_get_handle(kvm))
293629b40f10SJanosch Frank 		kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
293729b40f10SJanosch Frank 	debug_unregister(kvm->arch.dbf);
2938c54f0d6aSDavid Hildenbrand 	free_page((unsigned long)kvm->arch.sie_page2);
293927e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
29406ea427bbSMartin Schwidefsky 		gmap_remove(kvm->arch.gmap);
2941841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
294267335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
2943a3508fbeSDavid Hildenbrand 	kvm_s390_vsie_destroy(kvm);
29448335713aSChristian Borntraeger 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
2945b0c632dbSHeiko Carstens }
2946b0c632dbSHeiko Carstens 
2947b0c632dbSHeiko Carstens /* Section: vcpu related */
2948dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2949b0c632dbSHeiko Carstens {
29506ea427bbSMartin Schwidefsky 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
295127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
295227e0393fSCarsten Otte 		return -ENOMEM;
29532c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
2954dafd032aSDominik Dingel 
295527e0393fSCarsten Otte 	return 0;
295627e0393fSCarsten Otte }
295727e0393fSCarsten Otte 
2958a6e2f683SEugene (jno) Dvurechenski static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2959a6e2f683SEugene (jno) Dvurechenski {
2960a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries())
2961a6940674SDavid Hildenbrand 		return;
29625e044315SEugene (jno) Dvurechenski 	read_lock(&vcpu->kvm->arch.sca_lock);
29637d43bafcSEugene (jno) Dvurechenski 	if (vcpu->kvm->arch.use_esca) {
29647d43bafcSEugene (jno) Dvurechenski 		struct esca_block *sca = vcpu->kvm->arch.sca;
29657d43bafcSEugene (jno) Dvurechenski 
29667d43bafcSEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
29677d43bafcSEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
29687d43bafcSEugene (jno) Dvurechenski 	} else {
2969bc784cceSEugene (jno) Dvurechenski 		struct bsca_block *sca = vcpu->kvm->arch.sca;
2970a6e2f683SEugene (jno) Dvurechenski 
2971a6e2f683SEugene (jno) Dvurechenski 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2972a6e2f683SEugene (jno) Dvurechenski 		sca->cpu[vcpu->vcpu_id].sda = 0;
2973a6e2f683SEugene (jno) Dvurechenski 	}
29745e044315SEugene (jno) Dvurechenski 	read_unlock(&vcpu->kvm->arch.sca_lock);
29757d43bafcSEugene (jno) Dvurechenski }
2976a6e2f683SEugene (jno) Dvurechenski 
2977eaa78f34SDavid Hildenbrand static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2978a6e2f683SEugene (jno) Dvurechenski {
2979a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
2980a6940674SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
2981a6940674SDavid Hildenbrand 
2982a6940674SDavid Hildenbrand 		/* we still need the basic sca for the ipte control */
2983a6940674SDavid Hildenbrand 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2984a6940674SDavid Hildenbrand 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2985f07afa04SDavid Hildenbrand 		return;
2986a6940674SDavid Hildenbrand 	}
2987eaa78f34SDavid Hildenbrand 	read_lock(&vcpu->kvm->arch.sca_lock);
2988eaa78f34SDavid Hildenbrand 	if (vcpu->kvm->arch.use_esca) {
2989eaa78f34SDavid Hildenbrand 		struct esca_block *sca = vcpu->kvm->arch.sca;
29907d43bafcSEugene (jno) Dvurechenski 
2991eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
29927d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
29937d43bafcSEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
29940c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2995eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
29967d43bafcSEugene (jno) Dvurechenski 	} else {
2997eaa78f34SDavid Hildenbrand 		struct bsca_block *sca = vcpu->kvm->arch.sca;
2998a6e2f683SEugene (jno) Dvurechenski 
2999eaa78f34SDavid Hildenbrand 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
3000a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
3001a6e2f683SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
3002eaa78f34SDavid Hildenbrand 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3003a6e2f683SEugene (jno) Dvurechenski 	}
3004eaa78f34SDavid Hildenbrand 	read_unlock(&vcpu->kvm->arch.sca_lock);
30055e044315SEugene (jno) Dvurechenski }
30065e044315SEugene (jno) Dvurechenski 
30075e044315SEugene (jno) Dvurechenski /* Basic SCA to Extended SCA data copy routines */
30085e044315SEugene (jno) Dvurechenski static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
30095e044315SEugene (jno) Dvurechenski {
30105e044315SEugene (jno) Dvurechenski 	d->sda = s->sda;
30115e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.c = s->sigp_ctrl.c;
30125e044315SEugene (jno) Dvurechenski 	d->sigp_ctrl.scn = s->sigp_ctrl.scn;
30135e044315SEugene (jno) Dvurechenski }
30145e044315SEugene (jno) Dvurechenski 
30155e044315SEugene (jno) Dvurechenski static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
30165e044315SEugene (jno) Dvurechenski {
30175e044315SEugene (jno) Dvurechenski 	int i;
30185e044315SEugene (jno) Dvurechenski 
30195e044315SEugene (jno) Dvurechenski 	d->ipte_control = s->ipte_control;
30205e044315SEugene (jno) Dvurechenski 	d->mcn[0] = s->mcn;
30215e044315SEugene (jno) Dvurechenski 	for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
30225e044315SEugene (jno) Dvurechenski 		sca_copy_entry(&d->cpu[i], &s->cpu[i]);
30235e044315SEugene (jno) Dvurechenski }
30245e044315SEugene (jno) Dvurechenski 
30255e044315SEugene (jno) Dvurechenski static int sca_switch_to_extended(struct kvm *kvm)
30265e044315SEugene (jno) Dvurechenski {
30275e044315SEugene (jno) Dvurechenski 	struct bsca_block *old_sca = kvm->arch.sca;
30285e044315SEugene (jno) Dvurechenski 	struct esca_block *new_sca;
30295e044315SEugene (jno) Dvurechenski 	struct kvm_vcpu *vcpu;
30305e044315SEugene (jno) Dvurechenski 	unsigned int vcpu_idx;
30315e044315SEugene (jno) Dvurechenski 	u32 scaol, scaoh;
30325e044315SEugene (jno) Dvurechenski 
303329b40f10SJanosch Frank 	if (kvm->arch.use_esca)
303429b40f10SJanosch Frank 		return 0;
303529b40f10SJanosch Frank 
3036c4196218SChristian Borntraeger 	new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
30375e044315SEugene (jno) Dvurechenski 	if (!new_sca)
30385e044315SEugene (jno) Dvurechenski 		return -ENOMEM;
30395e044315SEugene (jno) Dvurechenski 
30405e044315SEugene (jno) Dvurechenski 	scaoh = (u32)((u64)(new_sca) >> 32);
30415e044315SEugene (jno) Dvurechenski 	scaol = (u32)(u64)(new_sca) & ~0x3fU;
30425e044315SEugene (jno) Dvurechenski 
30435e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_block_all(kvm);
30445e044315SEugene (jno) Dvurechenski 	write_lock(&kvm->arch.sca_lock);
30455e044315SEugene (jno) Dvurechenski 
30465e044315SEugene (jno) Dvurechenski 	sca_copy_b_to_e(new_sca, old_sca);
30475e044315SEugene (jno) Dvurechenski 
30485e044315SEugene (jno) Dvurechenski 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
30495e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaoh = scaoh;
30505e044315SEugene (jno) Dvurechenski 		vcpu->arch.sie_block->scaol = scaol;
30510c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
30525e044315SEugene (jno) Dvurechenski 	}
30535e044315SEugene (jno) Dvurechenski 	kvm->arch.sca = new_sca;
30545e044315SEugene (jno) Dvurechenski 	kvm->arch.use_esca = 1;
30555e044315SEugene (jno) Dvurechenski 
30565e044315SEugene (jno) Dvurechenski 	write_unlock(&kvm->arch.sca_lock);
30575e044315SEugene (jno) Dvurechenski 	kvm_s390_vcpu_unblock_all(kvm);
30585e044315SEugene (jno) Dvurechenski 
30595e044315SEugene (jno) Dvurechenski 	free_page((unsigned long)old_sca);
30605e044315SEugene (jno) Dvurechenski 
30618335713aSChristian Borntraeger 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
30628335713aSChristian Borntraeger 		 old_sca, kvm->arch.sca);
30635e044315SEugene (jno) Dvurechenski 	return 0;
30647d43bafcSEugene (jno) Dvurechenski }
3065a6e2f683SEugene (jno) Dvurechenski 
3066a6e2f683SEugene (jno) Dvurechenski static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3067a6e2f683SEugene (jno) Dvurechenski {
30685e044315SEugene (jno) Dvurechenski 	int rc;
30695e044315SEugene (jno) Dvurechenski 
3070a6940674SDavid Hildenbrand 	if (!kvm_s390_use_sca_entries()) {
3071a6940674SDavid Hildenbrand 		if (id < KVM_MAX_VCPUS)
3072a6940674SDavid Hildenbrand 			return true;
3073a6940674SDavid Hildenbrand 		return false;
3074a6940674SDavid Hildenbrand 	}
30755e044315SEugene (jno) Dvurechenski 	if (id < KVM_S390_BSCA_CPU_SLOTS)
30765e044315SEugene (jno) Dvurechenski 		return true;
307776a6dd72SDavid Hildenbrand 	if (!sclp.has_esca || !sclp.has_64bscao)
30785e044315SEugene (jno) Dvurechenski 		return false;
30795e044315SEugene (jno) Dvurechenski 
30805e044315SEugene (jno) Dvurechenski 	mutex_lock(&kvm->lock);
30815e044315SEugene (jno) Dvurechenski 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
30825e044315SEugene (jno) Dvurechenski 	mutex_unlock(&kvm->lock);
30835e044315SEugene (jno) Dvurechenski 
30845e044315SEugene (jno) Dvurechenski 	return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3085a6e2f683SEugene (jno) Dvurechenski }
3086a6e2f683SEugene (jno) Dvurechenski 
3087db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3088db0758b2SDavid Hildenbrand static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3089db0758b2SDavid Hildenbrand {
3090db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
30919c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3092db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = get_tod_clock_fast();
30939c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3094db0758b2SDavid Hildenbrand }
3095db0758b2SDavid Hildenbrand 
3096db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3097db0758b2SDavid Hildenbrand static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3098db0758b2SDavid Hildenbrand {
3099db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
31009c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3101db0758b2SDavid Hildenbrand 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3102db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_start = 0;
31039c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3104db0758b2SDavid Hildenbrand }
3105db0758b2SDavid Hildenbrand 
3106db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3107db0758b2SDavid Hildenbrand static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3108db0758b2SDavid Hildenbrand {
3109db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3110db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = true;
3111db0758b2SDavid Hildenbrand 	__start_cpu_timer_accounting(vcpu);
3112db0758b2SDavid Hildenbrand }
3113db0758b2SDavid Hildenbrand 
3114db0758b2SDavid Hildenbrand /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3115db0758b2SDavid Hildenbrand static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3116db0758b2SDavid Hildenbrand {
3117db0758b2SDavid Hildenbrand 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3118db0758b2SDavid Hildenbrand 	__stop_cpu_timer_accounting(vcpu);
3119db0758b2SDavid Hildenbrand 	vcpu->arch.cputm_enabled = false;
3120db0758b2SDavid Hildenbrand }
3121db0758b2SDavid Hildenbrand 
3122db0758b2SDavid Hildenbrand static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3123db0758b2SDavid Hildenbrand {
3124db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3125db0758b2SDavid Hildenbrand 	__enable_cpu_timer_accounting(vcpu);
3126db0758b2SDavid Hildenbrand 	preempt_enable();
3127db0758b2SDavid Hildenbrand }
3128db0758b2SDavid Hildenbrand 
3129db0758b2SDavid Hildenbrand static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3130db0758b2SDavid Hildenbrand {
3131db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3132db0758b2SDavid Hildenbrand 	__disable_cpu_timer_accounting(vcpu);
3133db0758b2SDavid Hildenbrand 	preempt_enable();
3134db0758b2SDavid Hildenbrand }
3135db0758b2SDavid Hildenbrand 
31364287f247SDavid Hildenbrand /* set the cpu timer - may only be called from the VCPU thread itself */
31374287f247SDavid Hildenbrand void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
31384287f247SDavid Hildenbrand {
3139db0758b2SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
31409c23a131SDavid Hildenbrand 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3141db0758b2SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled)
3142db0758b2SDavid Hildenbrand 		vcpu->arch.cputm_start = get_tod_clock_fast();
31434287f247SDavid Hildenbrand 	vcpu->arch.sie_block->cputm = cputm;
31449c23a131SDavid Hildenbrand 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3145db0758b2SDavid Hildenbrand 	preempt_enable();
31464287f247SDavid Hildenbrand }
31474287f247SDavid Hildenbrand 
3148db0758b2SDavid Hildenbrand /* update and get the cpu timer - can also be called from other VCPU threads */
31494287f247SDavid Hildenbrand __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
31504287f247SDavid Hildenbrand {
31519c23a131SDavid Hildenbrand 	unsigned int seq;
3152db0758b2SDavid Hildenbrand 	__u64 value;
3153db0758b2SDavid Hildenbrand 
3154db0758b2SDavid Hildenbrand 	if (unlikely(!vcpu->arch.cputm_enabled))
31554287f247SDavid Hildenbrand 		return vcpu->arch.sie_block->cputm;
3156db0758b2SDavid Hildenbrand 
31579c23a131SDavid Hildenbrand 	preempt_disable(); /* protect from TOD sync and vcpu_load/put */
31589c23a131SDavid Hildenbrand 	do {
31599c23a131SDavid Hildenbrand 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
31609c23a131SDavid Hildenbrand 		/*
31619c23a131SDavid Hildenbrand 		 * If the writer would ever execute a read in the critical
31629c23a131SDavid Hildenbrand 		 * section, e.g. in irq context, we have a deadlock.
31639c23a131SDavid Hildenbrand 		 */
31649c23a131SDavid Hildenbrand 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3165db0758b2SDavid Hildenbrand 		value = vcpu->arch.sie_block->cputm;
31669c23a131SDavid Hildenbrand 		/* if cputm_start is 0, accounting is being started/stopped */
31679c23a131SDavid Hildenbrand 		if (likely(vcpu->arch.cputm_start))
3168db0758b2SDavid Hildenbrand 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
31699c23a131SDavid Hildenbrand 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
31709c23a131SDavid Hildenbrand 	preempt_enable();
3171db0758b2SDavid Hildenbrand 	return value;
31724287f247SDavid Hildenbrand }
31734287f247SDavid Hildenbrand 
3174b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3175b0c632dbSHeiko Carstens {
31769977e886SHendrik Brueckner 
317737d9df98SDavid Hildenbrand 	gmap_enable(vcpu->arch.enabled_gmap);
3178ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
31795ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3180db0758b2SDavid Hildenbrand 		__start_cpu_timer_accounting(vcpu);
318101a745acSDavid Hildenbrand 	vcpu->cpu = cpu;
3182b0c632dbSHeiko Carstens }
3183b0c632dbSHeiko Carstens 
3184b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3185b0c632dbSHeiko Carstens {
318601a745acSDavid Hildenbrand 	vcpu->cpu = -1;
31875ebda316SDavid Hildenbrand 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3188db0758b2SDavid Hildenbrand 		__stop_cpu_timer_accounting(vcpu);
31899daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
319037d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = gmap_get_enabled();
319137d9df98SDavid Hildenbrand 	gmap_disable(vcpu->arch.enabled_gmap);
31929977e886SHendrik Brueckner 
3193b0c632dbSHeiko Carstens }
3194b0c632dbSHeiko Carstens 
319531928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
319642897d86SMarcelo Tosatti {
319772f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
3198fdf03650SFan Zhang 	preempt_disable();
319972f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3200d16b52cbSDavid Hildenbrand 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3201fdf03650SFan Zhang 	preempt_enable();
320272f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
320325508824SDavid Hildenbrand 	if (!kvm_is_ucontrol(vcpu->kvm)) {
3204dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3205eaa78f34SDavid Hildenbrand 		sca_add_vcpu(vcpu);
320625508824SDavid Hildenbrand 	}
32076502a34cSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
32086502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
320937d9df98SDavid Hildenbrand 	/* make vcpu_load load the right gmap on the first trigger */
321037d9df98SDavid Hildenbrand 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
321142897d86SMarcelo Tosatti }
321242897d86SMarcelo Tosatti 
32138ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
32148ec2fa52SChristian Borntraeger {
32158ec2fa52SChristian Borntraeger 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
32168ec2fa52SChristian Borntraeger 	    test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
32178ec2fa52SChristian Borntraeger 		return true;
32188ec2fa52SChristian Borntraeger 	return false;
32198ec2fa52SChristian Borntraeger }
32208ec2fa52SChristian Borntraeger 
32218ec2fa52SChristian Borntraeger static bool kvm_has_pckmo_ecc(struct kvm *kvm)
32228ec2fa52SChristian Borntraeger {
32238ec2fa52SChristian Borntraeger 	/* At least one ECC subfunction must be present */
32248ec2fa52SChristian Borntraeger 	return kvm_has_pckmo_subfunc(kvm, 32) ||
32258ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 33) ||
32268ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 34) ||
32278ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 40) ||
32288ec2fa52SChristian Borntraeger 	       kvm_has_pckmo_subfunc(kvm, 41);
32298ec2fa52SChristian Borntraeger 
32308ec2fa52SChristian Borntraeger }
32318ec2fa52SChristian Borntraeger 
32325102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
32335102ee87STony Krowiak {
3234e585b24aSTony Krowiak 	/*
3235e585b24aSTony Krowiak 	 * If the AP instructions are not being interpreted and the MSAX3
3236e585b24aSTony Krowiak 	 * facility is not configured for the guest, there is nothing to set up.
3237e585b24aSTony Krowiak 	 */
3238e585b24aSTony Krowiak 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
32395102ee87STony Krowiak 		return;
32405102ee87STony Krowiak 
3241e585b24aSTony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3242a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
324337940fb0STony Krowiak 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
32448ec2fa52SChristian Borntraeger 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3245a374e892STony Krowiak 
3246e585b24aSTony Krowiak 	if (vcpu->kvm->arch.crypto.apie)
3247e585b24aSTony Krowiak 		vcpu->arch.sie_block->eca |= ECA_APIE;
3248e585b24aSTony Krowiak 
3249e585b24aSTony Krowiak 	/* Set up protected key support */
32508ec2fa52SChristian Borntraeger 	if (vcpu->kvm->arch.crypto.aes_kw) {
3251a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
32528ec2fa52SChristian Borntraeger 		/* ecc is also wrapped with AES key */
32538ec2fa52SChristian Borntraeger 		if (kvm_has_pckmo_ecc(vcpu->kvm))
32548ec2fa52SChristian Borntraeger 			vcpu->arch.sie_block->ecd |= ECD_ECC;
32558ec2fa52SChristian Borntraeger 	}
32568ec2fa52SChristian Borntraeger 
3257a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
3258a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
32595102ee87STony Krowiak }
32605102ee87STony Krowiak 
3261b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3262b31605c1SDominik Dingel {
3263b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
3264b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
3265b31605c1SDominik Dingel }
3266b31605c1SDominik Dingel 
3267b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3268b31605c1SDominik Dingel {
3269c4196218SChristian Borntraeger 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
3270b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
3271b31605c1SDominik Dingel 		return -ENOMEM;
3272b31605c1SDominik Dingel 	return 0;
3273b31605c1SDominik Dingel }
3274b31605c1SDominik Dingel 
327591520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
327691520f1aSMichael Mueller {
327791520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
327891520f1aSMichael Mueller 
327991520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
328080bc79dcSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 7))
3281c54f0d6aSDavid Hildenbrand 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
328291520f1aSMichael Mueller }
328391520f1aSMichael Mueller 
3284ff72bb55SSean Christopherson static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3285ff72bb55SSean Christopherson {
3286b31605c1SDominik Dingel 	int rc = 0;
328729b40f10SJanosch Frank 	u16 uvrc, uvrrc;
3288b31288faSKonstantin Weitz 
32899e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
32909e6dabefSCornelia Huck 						    CPUSTAT_SM |
3291a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
3292a4a4f191SGuenther Hutzl 
329353df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
3294ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
329553df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
3296ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3297a4a4f191SGuenther Hutzl 
329891520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
329991520f1aSMichael Mueller 
3300bdab09f3SDavid Hildenbrand 	/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3301bdab09f3SDavid Hildenbrand 	if (MACHINE_HAS_ESOP)
33020c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3303bd50e8ecSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 9))
33040c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
3305f597d24eSDavid Hildenbrand 	if (test_kvm_facility(vcpu->kvm, 73))
33060c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb |= ECB_TE;
33077feb6bb8SMichael Mueller 
3308c9f0a2b8SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
33090c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3310cd1836f5SJanosch Frank 	if (test_kvm_facility(vcpu->kvm, 130))
33110c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
33120c9d8683SDavid Hildenbrand 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
331348ee7d3aSDavid Hildenbrand 	if (sclp.has_cei)
33140c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_CEI;
331511ad65b7SDavid Hildenbrand 	if (sclp.has_ib)
33160c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_IB;
331737c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
33180c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SII;
331937c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
33200c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
332118280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
33220c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= ECA_VX;
33230c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
332413211ea7SEric Farman 	}
33258fa1696eSCollin L. Walling 	if (test_kvm_facility(vcpu->kvm, 139))
33268fa1696eSCollin L. Walling 		vcpu->arch.sie_block->ecd |= ECD_MEF;
3327a3da7b4aSChristian Borntraeger 	if (test_kvm_facility(vcpu->kvm, 156))
3328a3da7b4aSChristian Borntraeger 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3329d7c5cb01SMichael Mueller 	if (vcpu->arch.sie_block->gd) {
3330d7c5cb01SMichael Mueller 		vcpu->arch.sie_block->eca |= ECA_AIV;
3331d7c5cb01SMichael Mueller 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3332d7c5cb01SMichael Mueller 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3333d7c5cb01SMichael Mueller 	}
33344e0b1ab7SFan Zhang 	vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
33354e0b1ab7SFan Zhang 					| SDNXC;
3336c6e5f166SFan Zhang 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3337730cd632SFarhan Ali 
3338730cd632SFarhan Ali 	if (sclp.has_kss)
3339ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3340730cd632SFarhan Ali 	else
3341492d8642SThomas Huth 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
33425a5e6536SMatthew Rosato 
3343e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
3344b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
3345b31605c1SDominik Dingel 		if (rc)
3346b31605c1SDominik Dingel 			return rc;
3347b31288faSKonstantin Weitz 	}
33480ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3349ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
33509d8d5786SMichael Mueller 
335167d49d52SCollin Walling 	vcpu->arch.sie_block->hpid = HPID_KVM;
335267d49d52SCollin Walling 
33535102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
33545102ee87STony Krowiak 
335529b40f10SJanosch Frank 	mutex_lock(&vcpu->kvm->lock);
335629b40f10SJanosch Frank 	if (kvm_s390_pv_is_protected(vcpu->kvm)) {
335729b40f10SJanosch Frank 		rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
335829b40f10SJanosch Frank 		if (rc)
335929b40f10SJanosch Frank 			kvm_s390_vcpu_unsetup_cmma(vcpu);
336029b40f10SJanosch Frank 	}
336129b40f10SJanosch Frank 	mutex_unlock(&vcpu->kvm->lock);
336229b40f10SJanosch Frank 
3363b31605c1SDominik Dingel 	return rc;
3364b0c632dbSHeiko Carstens }
3365b0c632dbSHeiko Carstens 
3366897cc38eSSean Christopherson int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3367897cc38eSSean Christopherson {
3368897cc38eSSean Christopherson 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3369897cc38eSSean Christopherson 		return -EINVAL;
3370897cc38eSSean Christopherson 	return 0;
3371897cc38eSSean Christopherson }
3372897cc38eSSean Christopherson 
3373e529ef66SSean Christopherson int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3374b0c632dbSHeiko Carstens {
33757feb6bb8SMichael Mueller 	struct sie_page *sie_page;
3376897cc38eSSean Christopherson 	int rc;
33774d47555aSCarsten Otte 
3378da72ca4dSQingFeng Hao 	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3379c4196218SChristian Borntraeger 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
33807feb6bb8SMichael Mueller 	if (!sie_page)
3381e529ef66SSean Christopherson 		return -ENOMEM;
3382b0c632dbSHeiko Carstens 
33837feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
33847feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
33857feb6bb8SMichael Mueller 
3386efed1104SDavid Hildenbrand 	/* the real guest size will always be smaller than msl */
3387efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->mso = 0;
3388efed1104SDavid Hildenbrand 	vcpu->arch.sie_block->msl = sclp.hamax;
3389efed1104SDavid Hildenbrand 
3390e529ef66SSean Christopherson 	vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3391ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
3392e529ef66SSean Christopherson 	vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
33934b9f9525SMichael Mueller 	if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
33944b9f9525SMichael Mueller 		vcpu->arch.sie_block->gd |= GISA_FORMAT1;
33959c23a131SDavid Hildenbrand 	seqcount_init(&vcpu->arch.cputm_seqcount);
3396ba5c1e9bSCarsten Otte 
3397321f8ee5SSean Christopherson 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3398321f8ee5SSean Christopherson 	kvm_clear_async_pf_completion_queue(vcpu);
3399321f8ee5SSean Christopherson 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3400321f8ee5SSean Christopherson 				    KVM_SYNC_GPRS |
3401321f8ee5SSean Christopherson 				    KVM_SYNC_ACRS |
3402321f8ee5SSean Christopherson 				    KVM_SYNC_CRS |
3403321f8ee5SSean Christopherson 				    KVM_SYNC_ARCH0 |
340423a60f83SCollin Walling 				    KVM_SYNC_PFAULT |
340523a60f83SCollin Walling 				    KVM_SYNC_DIAG318;
3406321f8ee5SSean Christopherson 	kvm_s390_set_prefix(vcpu, 0);
3407321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 64))
3408321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3409321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 82))
3410321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3411321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 133))
3412321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3413321f8ee5SSean Christopherson 	if (test_kvm_facility(vcpu->kvm, 156))
3414321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3415321f8ee5SSean Christopherson 	/* fprs can be synchronized via vrs, even if the guest has no vx. With
3416321f8ee5SSean Christopherson 	 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3417321f8ee5SSean Christopherson 	 */
3418321f8ee5SSean Christopherson 	if (MACHINE_HAS_VX)
3419321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3420321f8ee5SSean Christopherson 	else
3421321f8ee5SSean Christopherson 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3422321f8ee5SSean Christopherson 
3423321f8ee5SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm)) {
3424321f8ee5SSean Christopherson 		rc = __kvm_ucontrol_vcpu_init(vcpu);
3425321f8ee5SSean Christopherson 		if (rc)
3426a2017f17SSean Christopherson 			goto out_free_sie_block;
3427321f8ee5SSean Christopherson 	}
3428321f8ee5SSean Christopherson 
3429e529ef66SSean Christopherson 	VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3430e529ef66SSean Christopherson 		 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3431e529ef66SSean Christopherson 	trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3432b0c632dbSHeiko Carstens 
3433ff72bb55SSean Christopherson 	rc = kvm_s390_vcpu_setup(vcpu);
3434ff72bb55SSean Christopherson 	if (rc)
3435ff72bb55SSean Christopherson 		goto out_ucontrol_uninit;
3436e529ef66SSean Christopherson 	return 0;
3437e529ef66SSean Christopherson 
3438ff72bb55SSean Christopherson out_ucontrol_uninit:
3439ff72bb55SSean Christopherson 	if (kvm_is_ucontrol(vcpu->kvm))
3440ff72bb55SSean Christopherson 		gmap_remove(vcpu->arch.gmap);
34417b06bf2fSWei Yongjun out_free_sie_block:
34427b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
3443e529ef66SSean Christopherson 	return rc;
3444b0c632dbSHeiko Carstens }
3445b0c632dbSHeiko Carstens 
3446b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3447b0c632dbSHeiko Carstens {
34489a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
3449b0c632dbSHeiko Carstens }
3450b0c632dbSHeiko Carstens 
3451199b5763SLongpeng(Mike) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3452199b5763SLongpeng(Mike) {
34530546c63dSLongpeng(Mike) 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3454199b5763SLongpeng(Mike) }
3455199b5763SLongpeng(Mike) 
345627406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
345749b99e1eSChristian Borntraeger {
3458805de8f4SPeter Zijlstra 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
345961a6df54SDavid Hildenbrand 	exit_sie(vcpu);
346049b99e1eSChristian Borntraeger }
346149b99e1eSChristian Borntraeger 
346227406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
346349b99e1eSChristian Borntraeger {
3464805de8f4SPeter Zijlstra 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
346549b99e1eSChristian Borntraeger }
346649b99e1eSChristian Borntraeger 
34678e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
34688e236546SChristian Borntraeger {
3469805de8f4SPeter Zijlstra 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
347061a6df54SDavid Hildenbrand 	exit_sie(vcpu);
34718e236546SChristian Borntraeger }
34728e236546SChristian Borntraeger 
34739ea59728SDavid Hildenbrand bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
34749ea59728SDavid Hildenbrand {
34759ea59728SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->prog20) &
34769ea59728SDavid Hildenbrand 	       (PROG_BLOCK_SIE | PROG_REQUEST);
34779ea59728SDavid Hildenbrand }
34789ea59728SDavid Hildenbrand 
34798e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
34808e236546SChristian Borntraeger {
34819bf9fde2SJason J. Herne 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
34828e236546SChristian Borntraeger }
34838e236546SChristian Borntraeger 
348449b99e1eSChristian Borntraeger /*
34859ea59728SDavid Hildenbrand  * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
348649b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
348749b99e1eSChristian Borntraeger  * return immediately. */
348849b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
348949b99e1eSChristian Borntraeger {
3490ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
34919ea59728SDavid Hildenbrand 	kvm_s390_vsie_kick(vcpu);
349249b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
349349b99e1eSChristian Borntraeger 		cpu_relax();
349449b99e1eSChristian Borntraeger }
349549b99e1eSChristian Borntraeger 
34968e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
34978e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
349849b99e1eSChristian Borntraeger {
34998e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
35008e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
350149b99e1eSChristian Borntraeger }
350249b99e1eSChristian Borntraeger 
3503414d3b07SMartin Schwidefsky static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3504414d3b07SMartin Schwidefsky 			      unsigned long end)
35052c70fe44SChristian Borntraeger {
35062c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
35072c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
3508414d3b07SMartin Schwidefsky 	unsigned long prefix;
3509414d3b07SMartin Schwidefsky 	int i;
35102c70fe44SChristian Borntraeger 
351165d0b0d4SDavid Hildenbrand 	if (gmap_is_shadow(gmap))
351265d0b0d4SDavid Hildenbrand 		return;
3513414d3b07SMartin Schwidefsky 	if (start >= 1UL << 31)
3514414d3b07SMartin Schwidefsky 		/* We are only interested in prefix pages */
3515414d3b07SMartin Schwidefsky 		return;
35162c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
35172c70fe44SChristian Borntraeger 		/* match against both prefix pages */
3518414d3b07SMartin Schwidefsky 		prefix = kvm_s390_get_prefix(vcpu);
3519414d3b07SMartin Schwidefsky 		if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3520414d3b07SMartin Schwidefsky 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3521414d3b07SMartin Schwidefsky 				   start, end);
35228e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
35232c70fe44SChristian Borntraeger 		}
35242c70fe44SChristian Borntraeger 	}
35252c70fe44SChristian Borntraeger }
35262c70fe44SChristian Borntraeger 
35278b905d28SChristian Borntraeger bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
35288b905d28SChristian Borntraeger {
35298b905d28SChristian Borntraeger 	/* do not poll with more than halt_poll_max_steal percent of steal time */
35308b905d28SChristian Borntraeger 	if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
35318b905d28SChristian Borntraeger 	    halt_poll_max_steal) {
35328b905d28SChristian Borntraeger 		vcpu->stat.halt_no_poll_steal++;
35338b905d28SChristian Borntraeger 		return true;
35348b905d28SChristian Borntraeger 	}
35358b905d28SChristian Borntraeger 	return false;
35368b905d28SChristian Borntraeger }
35378b905d28SChristian Borntraeger 
3538b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3539b6d33834SChristoffer Dall {
3540b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
3541b6d33834SChristoffer Dall 	BUG();
3542b6d33834SChristoffer Dall 	return 0;
3543b6d33834SChristoffer Dall }
3544b6d33834SChristoffer Dall 
354514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
354614eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
354714eebd91SCarsten Otte {
354814eebd91SCarsten Otte 	int r = -EINVAL;
354914eebd91SCarsten Otte 
355014eebd91SCarsten Otte 	switch (reg->id) {
355129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
355229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
355329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
355429b7c71bSCarsten Otte 		break;
355529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
355629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
355729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
355829b7c71bSCarsten Otte 		break;
355946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
35604287f247SDavid Hildenbrand 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
356146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
356246a6dd1cSJason J. herne 		break;
356346a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
356446a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
356546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
356646a6dd1cSJason J. herne 		break;
3567536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
3568536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
3569536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3570536336c2SDominik Dingel 		break;
3571536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
3572536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
3573536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3574536336c2SDominik Dingel 		break;
3575536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
3576536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
3577536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3578536336c2SDominik Dingel 		break;
3579672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
3580672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
3581672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
3582672550fbSChristian Borntraeger 		break;
3583afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
3584afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
3585afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
3586afa45ff5SChristian Borntraeger 		break;
358714eebd91SCarsten Otte 	default:
358814eebd91SCarsten Otte 		break;
358914eebd91SCarsten Otte 	}
359014eebd91SCarsten Otte 
359114eebd91SCarsten Otte 	return r;
359214eebd91SCarsten Otte }
359314eebd91SCarsten Otte 
359414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
359514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
359614eebd91SCarsten Otte {
359714eebd91SCarsten Otte 	int r = -EINVAL;
35984287f247SDavid Hildenbrand 	__u64 val;
359914eebd91SCarsten Otte 
360014eebd91SCarsten Otte 	switch (reg->id) {
360129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
360229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
360329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
360429b7c71bSCarsten Otte 		break;
360529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
360629b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
360729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
360829b7c71bSCarsten Otte 		break;
360946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
36104287f247SDavid Hildenbrand 		r = get_user(val, (u64 __user *)reg->addr);
36114287f247SDavid Hildenbrand 		if (!r)
36124287f247SDavid Hildenbrand 			kvm_s390_set_cpu_timer(vcpu, val);
361346a6dd1cSJason J. herne 		break;
361446a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
361546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
361646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
361746a6dd1cSJason J. herne 		break;
3618536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
3619536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
3620536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
36219fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
36229fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
3623536336c2SDominik Dingel 		break;
3624536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
3625536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
3626536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3627536336c2SDominik Dingel 		break;
3628536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
3629536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
3630536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
3631536336c2SDominik Dingel 		break;
3632672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
3633672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
3634672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
3635672550fbSChristian Borntraeger 		break;
3636afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
3637afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
3638afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
3639afa45ff5SChristian Borntraeger 		break;
364014eebd91SCarsten Otte 	default:
364114eebd91SCarsten Otte 		break;
364214eebd91SCarsten Otte 	}
364314eebd91SCarsten Otte 
364414eebd91SCarsten Otte 	return r;
364514eebd91SCarsten Otte }
3646b6d33834SChristoffer Dall 
36477de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
3648b0c632dbSHeiko Carstens {
36497de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
36507de3f142SJanosch Frank 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
36517de3f142SJanosch Frank 	memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
36527de3f142SJanosch Frank 
36537de3f142SJanosch Frank 	kvm_clear_async_pf_completion_queue(vcpu);
36547de3f142SJanosch Frank 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
36557de3f142SJanosch Frank 		kvm_s390_vcpu_stop(vcpu);
36567de3f142SJanosch Frank 	kvm_s390_clear_local_irqs(vcpu);
36577de3f142SJanosch Frank }
36587de3f142SJanosch Frank 
36597de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
36607de3f142SJanosch Frank {
36617de3f142SJanosch Frank 	/* Initial reset is a superset of the normal reset */
36627de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_normal_reset(vcpu);
36637de3f142SJanosch Frank 
3664e93fc7b4SChristian Borntraeger 	/*
3665e93fc7b4SChristian Borntraeger 	 * This equals initial cpu reset in pop, but we don't switch to ESA.
3666e93fc7b4SChristian Borntraeger 	 * We do not only reset the internal data, but also ...
3667e93fc7b4SChristian Borntraeger 	 */
36687de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.mask = 0;
36697de3f142SJanosch Frank 	vcpu->arch.sie_block->gpsw.addr = 0;
36707de3f142SJanosch Frank 	kvm_s390_set_prefix(vcpu, 0);
36717de3f142SJanosch Frank 	kvm_s390_set_cpu_timer(vcpu, 0);
36727de3f142SJanosch Frank 	vcpu->arch.sie_block->ckc = 0;
36737de3f142SJanosch Frank 	memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
36747de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
36757de3f142SJanosch Frank 	vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3676e93fc7b4SChristian Borntraeger 
3677e93fc7b4SChristian Borntraeger 	/* ... the data in sync regs */
3678e93fc7b4SChristian Borntraeger 	memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3679e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
3680e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3681e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3682e93fc7b4SChristian Borntraeger 	vcpu->run->psw_addr = 0;
3683e93fc7b4SChristian Borntraeger 	vcpu->run->psw_mask = 0;
3684e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.todpr = 0;
3685e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.cputm = 0;
3686e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.ckc = 0;
3687e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.pp = 0;
3688e93fc7b4SChristian Borntraeger 	vcpu->run->s.regs.gbea = 1;
36897de3f142SJanosch Frank 	vcpu->run->s.regs.fpc = 0;
36900f303504SJanosch Frank 	/*
36910f303504SJanosch Frank 	 * Do not reset these registers in the protected case, as some of
36920f303504SJanosch Frank 	 * them are overlayed and they are not accessible in this case
36930f303504SJanosch Frank 	 * anyway.
36940f303504SJanosch Frank 	 */
36950f303504SJanosch Frank 	if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
36967de3f142SJanosch Frank 		vcpu->arch.sie_block->gbea = 1;
36977de3f142SJanosch Frank 		vcpu->arch.sie_block->pp = 0;
36987de3f142SJanosch Frank 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
36990f303504SJanosch Frank 		vcpu->arch.sie_block->todpr = 0;
37000f303504SJanosch Frank 	}
37017de3f142SJanosch Frank }
37027de3f142SJanosch Frank 
37037de3f142SJanosch Frank static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
37047de3f142SJanosch Frank {
37057de3f142SJanosch Frank 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
37067de3f142SJanosch Frank 
37077de3f142SJanosch Frank 	/* Clear reset is a superset of the initial reset */
37087de3f142SJanosch Frank 	kvm_arch_vcpu_ioctl_initial_reset(vcpu);
37097de3f142SJanosch Frank 
37107de3f142SJanosch Frank 	memset(&regs->gprs, 0, sizeof(regs->gprs));
37117de3f142SJanosch Frank 	memset(&regs->vrs, 0, sizeof(regs->vrs));
37127de3f142SJanosch Frank 	memset(&regs->acrs, 0, sizeof(regs->acrs));
37137de3f142SJanosch Frank 	memset(&regs->gscb, 0, sizeof(regs->gscb));
37147de3f142SJanosch Frank 
37157de3f142SJanosch Frank 	regs->etoken = 0;
37167de3f142SJanosch Frank 	regs->etoken_extension = 0;
3717b0c632dbSHeiko Carstens }
3718b0c632dbSHeiko Carstens 
3719b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3720b0c632dbSHeiko Carstens {
3721875656feSChristoffer Dall 	vcpu_load(vcpu);
37225a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
3723875656feSChristoffer Dall 	vcpu_put(vcpu);
3724b0c632dbSHeiko Carstens 	return 0;
3725b0c632dbSHeiko Carstens }
3726b0c632dbSHeiko Carstens 
3727b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3728b0c632dbSHeiko Carstens {
37291fc9b76bSChristoffer Dall 	vcpu_load(vcpu);
37305a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
37311fc9b76bSChristoffer Dall 	vcpu_put(vcpu);
3732b0c632dbSHeiko Carstens 	return 0;
3733b0c632dbSHeiko Carstens }
3734b0c632dbSHeiko Carstens 
3735b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3736b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
3737b0c632dbSHeiko Carstens {
3738b4ef9d4eSChristoffer Dall 	vcpu_load(vcpu);
3739b4ef9d4eSChristoffer Dall 
374059674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
3741b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
3742b4ef9d4eSChristoffer Dall 
3743b4ef9d4eSChristoffer Dall 	vcpu_put(vcpu);
3744b0c632dbSHeiko Carstens 	return 0;
3745b0c632dbSHeiko Carstens }
3746b0c632dbSHeiko Carstens 
3747b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3748b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
3749b0c632dbSHeiko Carstens {
3750bcdec41cSChristoffer Dall 	vcpu_load(vcpu);
3751bcdec41cSChristoffer Dall 
375259674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
3753b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
3754bcdec41cSChristoffer Dall 
3755bcdec41cSChristoffer Dall 	vcpu_put(vcpu);
3756b0c632dbSHeiko Carstens 	return 0;
3757b0c632dbSHeiko Carstens }
3758b0c632dbSHeiko Carstens 
3759b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3760b0c632dbSHeiko Carstens {
37616a96bc7fSChristoffer Dall 	int ret = 0;
37626a96bc7fSChristoffer Dall 
37636a96bc7fSChristoffer Dall 	vcpu_load(vcpu);
37646a96bc7fSChristoffer Dall 
37656a96bc7fSChristoffer Dall 	if (test_fp_ctl(fpu->fpc)) {
37666a96bc7fSChristoffer Dall 		ret = -EINVAL;
37676a96bc7fSChristoffer Dall 		goto out;
37686a96bc7fSChristoffer Dall 	}
3769e1788bb9SChristian Borntraeger 	vcpu->run->s.regs.fpc = fpu->fpc;
37709abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
3771a7d4b8f2SDavid Hildenbrand 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3772a7d4b8f2SDavid Hildenbrand 				 (freg_t *) fpu->fprs);
37739abc2a08SDavid Hildenbrand 	else
3774a7d4b8f2SDavid Hildenbrand 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
37756a96bc7fSChristoffer Dall 
37766a96bc7fSChristoffer Dall out:
37776a96bc7fSChristoffer Dall 	vcpu_put(vcpu);
37786a96bc7fSChristoffer Dall 	return ret;
3779b0c632dbSHeiko Carstens }
3780b0c632dbSHeiko Carstens 
3781b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3782b0c632dbSHeiko Carstens {
37831393123eSChristoffer Dall 	vcpu_load(vcpu);
37841393123eSChristoffer Dall 
37859abc2a08SDavid Hildenbrand 	/* make sure we have the latest values */
37869abc2a08SDavid Hildenbrand 	save_fpu_regs();
37879abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX)
3788a7d4b8f2SDavid Hildenbrand 		convert_vx_to_fp((freg_t *) fpu->fprs,
3789a7d4b8f2SDavid Hildenbrand 				 (__vector128 *) vcpu->run->s.regs.vrs);
37909abc2a08SDavid Hildenbrand 	else
3791a7d4b8f2SDavid Hildenbrand 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
3792e1788bb9SChristian Borntraeger 	fpu->fpc = vcpu->run->s.regs.fpc;
37931393123eSChristoffer Dall 
37941393123eSChristoffer Dall 	vcpu_put(vcpu);
3795b0c632dbSHeiko Carstens 	return 0;
3796b0c632dbSHeiko Carstens }
3797b0c632dbSHeiko Carstens 
3798b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3799b0c632dbSHeiko Carstens {
3800b0c632dbSHeiko Carstens 	int rc = 0;
3801b0c632dbSHeiko Carstens 
38027a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
3803b0c632dbSHeiko Carstens 		rc = -EBUSY;
3804d7b0b5ebSCarsten Otte 	else {
3805d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
3806d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
3807d7b0b5ebSCarsten Otte 	}
3808b0c632dbSHeiko Carstens 	return rc;
3809b0c632dbSHeiko Carstens }
3810b0c632dbSHeiko Carstens 
3811b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3812b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
3813b0c632dbSHeiko Carstens {
3814b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
3815b0c632dbSHeiko Carstens }
3816b0c632dbSHeiko Carstens 
381727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
381827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
381927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
382027291e21SDavid Hildenbrand 
3821d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3822d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
3823b0c632dbSHeiko Carstens {
382427291e21SDavid Hildenbrand 	int rc = 0;
382527291e21SDavid Hildenbrand 
382666b56562SChristoffer Dall 	vcpu_load(vcpu);
382766b56562SChristoffer Dall 
382827291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
382927291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
383027291e21SDavid Hildenbrand 
383166b56562SChristoffer Dall 	if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
383266b56562SChristoffer Dall 		rc = -EINVAL;
383366b56562SChristoffer Dall 		goto out;
383466b56562SChristoffer Dall 	}
383566b56562SChristoffer Dall 	if (!sclp.has_gpere) {
383666b56562SChristoffer Dall 		rc = -EINVAL;
383766b56562SChristoffer Dall 		goto out;
383866b56562SChristoffer Dall 	}
383927291e21SDavid Hildenbrand 
384027291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
384127291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
384227291e21SDavid Hildenbrand 		/* enforce guest PER */
3843ef8f4f49SDavid Hildenbrand 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
384427291e21SDavid Hildenbrand 
384527291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
384627291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
384727291e21SDavid Hildenbrand 	} else {
38489daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
384927291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
385027291e21SDavid Hildenbrand 	}
385127291e21SDavid Hildenbrand 
385227291e21SDavid Hildenbrand 	if (rc) {
385327291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
385427291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
38559daecfc6SDavid Hildenbrand 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
385627291e21SDavid Hildenbrand 	}
385727291e21SDavid Hildenbrand 
385866b56562SChristoffer Dall out:
385966b56562SChristoffer Dall 	vcpu_put(vcpu);
386027291e21SDavid Hildenbrand 	return rc;
3861b0c632dbSHeiko Carstens }
3862b0c632dbSHeiko Carstens 
386362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
386462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
386562d9f0dbSMarcelo Tosatti {
3866fd232561SChristoffer Dall 	int ret;
3867fd232561SChristoffer Dall 
3868fd232561SChristoffer Dall 	vcpu_load(vcpu);
3869fd232561SChristoffer Dall 
38706352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
3871fd232561SChristoffer Dall 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
38726352e4d2SDavid Hildenbrand 				      KVM_MP_STATE_OPERATING;
3873fd232561SChristoffer Dall 
3874fd232561SChristoffer Dall 	vcpu_put(vcpu);
3875fd232561SChristoffer Dall 	return ret;
387662d9f0dbSMarcelo Tosatti }
387762d9f0dbSMarcelo Tosatti 
387862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
387962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
388062d9f0dbSMarcelo Tosatti {
38816352e4d2SDavid Hildenbrand 	int rc = 0;
38826352e4d2SDavid Hildenbrand 
3883e83dff5eSChristoffer Dall 	vcpu_load(vcpu);
3884e83dff5eSChristoffer Dall 
38856352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
38866352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
38876352e4d2SDavid Hildenbrand 
38886352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
38896352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
3890fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_stop(vcpu);
38916352e4d2SDavid Hildenbrand 		break;
38926352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
3893fe28c786SJanosch Frank 		rc = kvm_s390_vcpu_start(vcpu);
38946352e4d2SDavid Hildenbrand 		break;
38956352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
38967c36a3fcSJanosch Frank 		if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
38977c36a3fcSJanosch Frank 			rc = -ENXIO;
38987c36a3fcSJanosch Frank 			break;
38997c36a3fcSJanosch Frank 		}
39007c36a3fcSJanosch Frank 		rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
39017c36a3fcSJanosch Frank 		break;
39026352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
39033b684a42SJoe Perches 		fallthrough;	/* CHECK_STOP and LOAD are not supported yet */
39046352e4d2SDavid Hildenbrand 	default:
39056352e4d2SDavid Hildenbrand 		rc = -ENXIO;
39066352e4d2SDavid Hildenbrand 	}
39076352e4d2SDavid Hildenbrand 
3908e83dff5eSChristoffer Dall 	vcpu_put(vcpu);
39096352e4d2SDavid Hildenbrand 	return rc;
391062d9f0dbSMarcelo Tosatti }
391162d9f0dbSMarcelo Tosatti 
39128ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
39138ad35755SDavid Hildenbrand {
39148d5fb0dcSDavid Hildenbrand 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
39158ad35755SDavid Hildenbrand }
39168ad35755SDavid Hildenbrand 
39172c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
39182c70fe44SChristian Borntraeger {
39198ad35755SDavid Hildenbrand retry:
39208e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
39212fa6e1e1SRadim Krčmář 	if (!kvm_request_pending(vcpu))
3922586b7ccdSChristian Borntraeger 		return 0;
39232c70fe44SChristian Borntraeger 	/*
39242c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
3925b2d73b2aSMartin Schwidefsky 	 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
39262c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
39272c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
39282c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
39292c70fe44SChristian Borntraeger 	 */
39308ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
39312c70fe44SChristian Borntraeger 		int rc;
3932b2d73b2aSMartin Schwidefsky 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
3933fda902cbSMichael Mueller 					  kvm_s390_get_prefix(vcpu),
3934b2d73b2aSMartin Schwidefsky 					  PAGE_SIZE * 2, PROT_WRITE);
3935aca411a4SJulius Niedworok 		if (rc) {
3936aca411a4SJulius Niedworok 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
39372c70fe44SChristian Borntraeger 			return rc;
3938aca411a4SJulius Niedworok 		}
39398ad35755SDavid Hildenbrand 		goto retry;
39402c70fe44SChristian Borntraeger 	}
39418ad35755SDavid Hildenbrand 
3942d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3943d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
3944d3d692c8SDavid Hildenbrand 		goto retry;
3945d3d692c8SDavid Hildenbrand 	}
3946d3d692c8SDavid Hildenbrand 
39478ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
39488ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
39498ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
3950ef8f4f49SDavid Hildenbrand 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
39518ad35755SDavid Hildenbrand 		}
39528ad35755SDavid Hildenbrand 		goto retry;
39538ad35755SDavid Hildenbrand 	}
39548ad35755SDavid Hildenbrand 
39558ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
39568ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
39578ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
39589daecfc6SDavid Hildenbrand 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
39598ad35755SDavid Hildenbrand 		}
39608ad35755SDavid Hildenbrand 		goto retry;
39618ad35755SDavid Hildenbrand 	}
39628ad35755SDavid Hildenbrand 
39636502a34cSDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
39646502a34cSDavid Hildenbrand 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
39656502a34cSDavid Hildenbrand 		goto retry;
39666502a34cSDavid Hildenbrand 	}
39676502a34cSDavid Hildenbrand 
3968190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3969190df4a2SClaudio Imbrenda 		/*
3970c9f0a2b8SJanosch Frank 		 * Disable CMM virtualization; we will emulate the ESSA
3971190df4a2SClaudio Imbrenda 		 * instruction manually, in order to provide additional
3972190df4a2SClaudio Imbrenda 		 * functionalities needed for live migration.
3973190df4a2SClaudio Imbrenda 		 */
3974190df4a2SClaudio Imbrenda 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3975190df4a2SClaudio Imbrenda 		goto retry;
3976190df4a2SClaudio Imbrenda 	}
3977190df4a2SClaudio Imbrenda 
3978190df4a2SClaudio Imbrenda 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3979190df4a2SClaudio Imbrenda 		/*
3980c9f0a2b8SJanosch Frank 		 * Re-enable CMM virtualization if CMMA is available and
3981c9f0a2b8SJanosch Frank 		 * CMM has been used.
3982190df4a2SClaudio Imbrenda 		 */
3983190df4a2SClaudio Imbrenda 		if ((vcpu->kvm->arch.use_cmma) &&
3984c9f0a2b8SJanosch Frank 		    (vcpu->kvm->mm->context.uses_cmm))
3985190df4a2SClaudio Imbrenda 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3986190df4a2SClaudio Imbrenda 		goto retry;
3987190df4a2SClaudio Imbrenda 	}
3988190df4a2SClaudio Imbrenda 
39890759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
399072875d8aSRadim Krčmář 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
39913194cdb7SDavid Hildenbrand 	/* we left the vsie handler, nothing to do, just clear the request */
39923194cdb7SDavid Hildenbrand 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
39930759d068SDavid Hildenbrand 
39942c70fe44SChristian Borntraeger 	return 0;
39952c70fe44SChristian Borntraeger }
39962c70fe44SChristian Borntraeger 
39970e7def5fSDavid Hildenbrand void kvm_s390_set_tod_clock(struct kvm *kvm,
39988fa1696eSCollin L. Walling 			    const struct kvm_s390_vm_tod_clock *gtod)
39998fa1696eSCollin L. Walling {
40008fa1696eSCollin L. Walling 	struct kvm_vcpu *vcpu;
40012cfd7b73SHeiko Carstens 	union tod_clock clk;
40028fa1696eSCollin L. Walling 	int i;
40038fa1696eSCollin L. Walling 
40048fa1696eSCollin L. Walling 	mutex_lock(&kvm->lock);
40058fa1696eSCollin L. Walling 	preempt_disable();
40068fa1696eSCollin L. Walling 
40072cfd7b73SHeiko Carstens 	store_tod_clock_ext(&clk);
40088fa1696eSCollin L. Walling 
40092cfd7b73SHeiko Carstens 	kvm->arch.epoch = gtod->tod - clk.tod;
40100e7def5fSDavid Hildenbrand 	kvm->arch.epdx = 0;
40110e7def5fSDavid Hildenbrand 	if (test_kvm_facility(kvm, 139)) {
40122cfd7b73SHeiko Carstens 		kvm->arch.epdx = gtod->epoch_idx - clk.ei;
40138fa1696eSCollin L. Walling 		if (kvm->arch.epoch > gtod->tod)
40148fa1696eSCollin L. Walling 			kvm->arch.epdx -= 1;
40150e7def5fSDavid Hildenbrand 	}
40168fa1696eSCollin L. Walling 
40178fa1696eSCollin L. Walling 	kvm_s390_vcpu_block_all(kvm);
40188fa1696eSCollin L. Walling 	kvm_for_each_vcpu(i, vcpu, kvm) {
40198fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
40208fa1696eSCollin L. Walling 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
40218fa1696eSCollin L. Walling 	}
40228fa1696eSCollin L. Walling 
40238fa1696eSCollin L. Walling 	kvm_s390_vcpu_unblock_all(kvm);
40248fa1696eSCollin L. Walling 	preempt_enable();
40258fa1696eSCollin L. Walling 	mutex_unlock(&kvm->lock);
40268fa1696eSCollin L. Walling }
40278fa1696eSCollin L. Walling 
4028fa576c58SThomas Huth /**
4029fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
4030fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
4031fa576c58SThomas Huth  * @gpa: Guest physical address
4032fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
4033fa576c58SThomas Huth  *
4034fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
4035fa576c58SThomas Huth  *
4036fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
4037fa576c58SThomas Huth  */
4038fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
403924eb3a82SDominik Dingel {
4040527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
4041527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
404224eb3a82SDominik Dingel }
404324eb3a82SDominik Dingel 
40443c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
40453c038e6bSDominik Dingel 				      unsigned long token)
40463c038e6bSDominik Dingel {
40473c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
4048383d0b05SJens Freimann 	struct kvm_s390_irq irq;
40493c038e6bSDominik Dingel 
40503c038e6bSDominik Dingel 	if (start_token) {
4051383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
4052383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
4053383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
40543c038e6bSDominik Dingel 	} else {
40553c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
4056383d0b05SJens Freimann 		inti.parm64 = token;
40573c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
40583c038e6bSDominik Dingel 	}
40593c038e6bSDominik Dingel }
40603c038e6bSDominik Dingel 
40612a18b7e7SVitaly Kuznetsov bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
40623c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
40633c038e6bSDominik Dingel {
40643c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
40653c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
40662a18b7e7SVitaly Kuznetsov 
40672a18b7e7SVitaly Kuznetsov 	return true;
40683c038e6bSDominik Dingel }
40693c038e6bSDominik Dingel 
40703c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
40713c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
40723c038e6bSDominik Dingel {
40733c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
40743c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
40753c038e6bSDominik Dingel }
40763c038e6bSDominik Dingel 
40773c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
40783c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
40793c038e6bSDominik Dingel {
40803c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
40813c038e6bSDominik Dingel }
40823c038e6bSDominik Dingel 
40837c0ade6cSVitaly Kuznetsov bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
40843c038e6bSDominik Dingel {
40853c038e6bSDominik Dingel 	/*
40863c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
40873c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
40883c038e6bSDominik Dingel 	 */
40893c038e6bSDominik Dingel 	return true;
40903c038e6bSDominik Dingel }
40913c038e6bSDominik Dingel 
4092e8c22266SVitaly Kuznetsov static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
40933c038e6bSDominik Dingel {
40943c038e6bSDominik Dingel 	hva_t hva;
40953c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
40963c038e6bSDominik Dingel 
40973c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4098e8c22266SVitaly Kuznetsov 		return false;
40993c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
41003c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
4101e8c22266SVitaly Kuznetsov 		return false;
41023c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
4103e8c22266SVitaly Kuznetsov 		return false;
41049a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
4105e8c22266SVitaly Kuznetsov 		return false;
4106b9224cd7SDavid Hildenbrand 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4107e8c22266SVitaly Kuznetsov 		return false;
41083c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
4109e8c22266SVitaly Kuznetsov 		return false;
41103c038e6bSDominik Dingel 
411181480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
411281480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
411381480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4114e8c22266SVitaly Kuznetsov 		return false;
41153c038e6bSDominik Dingel 
4116e8c22266SVitaly Kuznetsov 	return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
41173c038e6bSDominik Dingel }
41183c038e6bSDominik Dingel 
41193fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4120b0c632dbSHeiko Carstens {
41213fb4c40fSThomas Huth 	int rc, cpuflags;
4122e168bf8dSCarsten Otte 
41233c038e6bSDominik Dingel 	/*
41243c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
41253c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
41263c038e6bSDominik Dingel 	 * handled outside the worker.
41273c038e6bSDominik Dingel 	 */
41283c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
41293c038e6bSDominik Dingel 
41307ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
41317ec7c8c7SChristian Borntraeger 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4132b0c632dbSHeiko Carstens 
4133b0c632dbSHeiko Carstens 	if (need_resched())
4134b0c632dbSHeiko Carstens 		schedule();
4135b0c632dbSHeiko Carstens 
413679395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
413779395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
413879395031SJens Freimann 		if (rc)
413979395031SJens Freimann 			return rc;
414079395031SJens Freimann 	}
41410ff31867SCarsten Otte 
41422c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
41432c70fe44SChristian Borntraeger 	if (rc)
41442c70fe44SChristian Borntraeger 		return rc;
41452c70fe44SChristian Borntraeger 
414627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
414727291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
414827291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
414927291e21SDavid Hildenbrand 	}
415027291e21SDavid Hildenbrand 
41519f30f621SMichael Mueller 	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
41529f30f621SMichael Mueller 
4153b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
41543fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
41553fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
41563fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
41572b29a9fdSDominik Dingel 
41583fb4c40fSThomas Huth 	return 0;
41593fb4c40fSThomas Huth }
41603fb4c40fSThomas Huth 
4161492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4162492d8642SThomas Huth {
416356317920SDavid Hildenbrand 	struct kvm_s390_pgm_info pgm_info = {
416456317920SDavid Hildenbrand 		.code = PGM_ADDRESSING,
416556317920SDavid Hildenbrand 	};
416656317920SDavid Hildenbrand 	u8 opcode, ilen;
4167492d8642SThomas Huth 	int rc;
4168492d8642SThomas Huth 
4169492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4170492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
4171492d8642SThomas Huth 
4172492d8642SThomas Huth 	/*
4173492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
4174492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
4175492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
4176492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
4177492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
4178492d8642SThomas Huth 	 * to be able to forward the PSW.
4179492d8642SThomas Huth 	 */
41803fa8cad7SDavid Hildenbrand 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
418156317920SDavid Hildenbrand 	ilen = insn_length(opcode);
41829b0d721aSDavid Hildenbrand 	if (rc < 0) {
41839b0d721aSDavid Hildenbrand 		return rc;
41849b0d721aSDavid Hildenbrand 	} else if (rc) {
41859b0d721aSDavid Hildenbrand 		/* Instruction-Fetching Exceptions - we can't detect the ilen.
41869b0d721aSDavid Hildenbrand 		 * Forward by arbitrary ilc, injection will take care of
41879b0d721aSDavid Hildenbrand 		 * nullification if necessary.
41889b0d721aSDavid Hildenbrand 		 */
41899b0d721aSDavid Hildenbrand 		pgm_info = vcpu->arch.pgm;
41909b0d721aSDavid Hildenbrand 		ilen = 4;
41919b0d721aSDavid Hildenbrand 	}
419256317920SDavid Hildenbrand 	pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
419356317920SDavid Hildenbrand 	kvm_s390_forward_psw(vcpu, ilen);
419456317920SDavid Hildenbrand 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4195492d8642SThomas Huth }
4196492d8642SThomas Huth 
41973fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
41983fb4c40fSThomas Huth {
41994d62fcc0SQingFeng Hao 	struct mcck_volatile_info *mcck_info;
42004d62fcc0SQingFeng Hao 	struct sie_page *sie_page;
42014d62fcc0SQingFeng Hao 
42022b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
42032b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
42042b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
42052b29a9fdSDominik Dingel 
420627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
420727291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
420827291e21SDavid Hildenbrand 
42097ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
42107ec7c8c7SChristian Borntraeger 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
421171f116bfSDavid Hildenbrand 
42124d62fcc0SQingFeng Hao 	if (exit_reason == -EINTR) {
42134d62fcc0SQingFeng Hao 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
42144d62fcc0SQingFeng Hao 		sie_page = container_of(vcpu->arch.sie_block,
42154d62fcc0SQingFeng Hao 					struct sie_page, sie_block);
42164d62fcc0SQingFeng Hao 		mcck_info = &sie_page->mcck_info;
42174d62fcc0SQingFeng Hao 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
42184d62fcc0SQingFeng Hao 		return 0;
42194d62fcc0SQingFeng Hao 	}
42204d62fcc0SQingFeng Hao 
422171f116bfSDavid Hildenbrand 	if (vcpu->arch.sie_block->icptcode > 0) {
422271f116bfSDavid Hildenbrand 		int rc = kvm_handle_sie_intercept(vcpu);
422371f116bfSDavid Hildenbrand 
422471f116bfSDavid Hildenbrand 		if (rc != -EOPNOTSUPP)
422571f116bfSDavid Hildenbrand 			return rc;
422671f116bfSDavid Hildenbrand 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
422771f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
422871f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
422971f116bfSDavid Hildenbrand 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
423071f116bfSDavid Hildenbrand 		return -EREMOTE;
423171f116bfSDavid Hildenbrand 	} else if (exit_reason != -EFAULT) {
423271f116bfSDavid Hildenbrand 		vcpu->stat.exit_null++;
423371f116bfSDavid Hildenbrand 		return 0;
4234210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
4235210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4236210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
4237210b1607SThomas Huth 						current->thread.gmap_addr;
4238210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
423971f116bfSDavid Hildenbrand 		return -EREMOTE;
424024eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
42413c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
424224eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
424371f116bfSDavid Hildenbrand 		if (kvm_arch_setup_async_pf(vcpu))
424471f116bfSDavid Hildenbrand 			return 0;
424550a05be4SChristian Borntraeger 		vcpu->stat.pfault_sync++;
424671f116bfSDavid Hildenbrand 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
4247fa576c58SThomas Huth 	}
424871f116bfSDavid Hildenbrand 	return vcpu_post_run_fault_in_sie(vcpu);
42493fb4c40fSThomas Huth }
42503fb4c40fSThomas Huth 
42513adae0b4SJanosch Frank #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
42523fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
42533fb4c40fSThomas Huth {
42543fb4c40fSThomas Huth 	int rc, exit_reason;
4255c8aac234SJanosch Frank 	struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
42563fb4c40fSThomas Huth 
4257800c1065SThomas Huth 	/*
4258800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4259800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
4260800c1065SThomas Huth 	 */
4261800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4262800c1065SThomas Huth 
4263a76ccff6SThomas Huth 	do {
42643fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
42653fb4c40fSThomas Huth 		if (rc)
4266a76ccff6SThomas Huth 			break;
42673fb4c40fSThomas Huth 
4268800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
42693fb4c40fSThomas Huth 		/*
4270a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
4271a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
42723fb4c40fSThomas Huth 		 */
42730097d12eSChristian Borntraeger 		local_irq_disable();
42746edaa530SPaolo Bonzini 		guest_enter_irqoff();
4275db0758b2SDavid Hildenbrand 		__disable_cpu_timer_accounting(vcpu);
42760097d12eSChristian Borntraeger 		local_irq_enable();
4277c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4278c8aac234SJanosch Frank 			memcpy(sie_page->pv_grregs,
4279c8aac234SJanosch Frank 			       vcpu->run->s.regs.gprs,
4280c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
4281c8aac234SJanosch Frank 		}
428256e62a73SSven Schnelle 		if (test_cpu_flag(CIF_FPU))
428356e62a73SSven Schnelle 			load_fpu_regs();
4284a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
4285a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
4286c8aac234SJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4287c8aac234SJanosch Frank 			memcpy(vcpu->run->s.regs.gprs,
4288c8aac234SJanosch Frank 			       sie_page->pv_grregs,
4289c8aac234SJanosch Frank 			       sizeof(sie_page->pv_grregs));
42903adae0b4SJanosch Frank 			/*
42913adae0b4SJanosch Frank 			 * We're not allowed to inject interrupts on intercepts
42923adae0b4SJanosch Frank 			 * that leave the guest state in an "in-between" state
42933adae0b4SJanosch Frank 			 * where the next SIE entry will do a continuation.
42943adae0b4SJanosch Frank 			 * Fence interrupts in our "internal" PSW.
42953adae0b4SJanosch Frank 			 */
42963adae0b4SJanosch Frank 			if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
42973adae0b4SJanosch Frank 			    vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
42983adae0b4SJanosch Frank 				vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
42993adae0b4SJanosch Frank 			}
4300c8aac234SJanosch Frank 		}
43010097d12eSChristian Borntraeger 		local_irq_disable();
4302db0758b2SDavid Hildenbrand 		__enable_cpu_timer_accounting(vcpu);
43036edaa530SPaolo Bonzini 		guest_exit_irqoff();
43040097d12eSChristian Borntraeger 		local_irq_enable();
4305800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
43063fb4c40fSThomas Huth 
43073fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
430827291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
43093fb4c40fSThomas Huth 
4310800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4311e168bf8dSCarsten Otte 	return rc;
4312b0c632dbSHeiko Carstens }
4313b0c632dbSHeiko Carstens 
43142f0a83beSTianjia Zhang static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4315b028ee3eSDavid Hildenbrand {
43162f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
43174d5f2c04SChristian Borntraeger 	struct runtime_instr_cb *riccb;
43184e0b1ab7SFan Zhang 	struct gs_cb *gscb;
43194d5f2c04SChristian Borntraeger 
43204d5f2c04SChristian Borntraeger 	riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
43214e0b1ab7SFan Zhang 	gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4322b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4323b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4324b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4325b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4326b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4327b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4328b028ee3eSDavid Hildenbrand 	}
4329b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4330b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4331b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4332b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
43339fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
43349fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
4335b028ee3eSDavid Hildenbrand 	}
433623a60f83SCollin Walling 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
433723a60f83SCollin Walling 		vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
433823a60f83SCollin Walling 		vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
433923a60f83SCollin Walling 	}
434080cd8763SFan Zhang 	/*
434180cd8763SFan Zhang 	 * If userspace sets the riccb (e.g. after migration) to a valid state,
434280cd8763SFan Zhang 	 * we should enable RI here instead of doing the lazy enablement.
434380cd8763SFan Zhang 	 */
434480cd8763SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
43454d5f2c04SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 64) &&
4346bb59c2daSAlice Frosi 	    riccb->v &&
43470c9d8683SDavid Hildenbrand 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
43484d5f2c04SChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
43490c9d8683SDavid Hildenbrand 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
435080cd8763SFan Zhang 	}
43514e0b1ab7SFan Zhang 	/*
43524e0b1ab7SFan Zhang 	 * If userspace sets the gscb (e.g. after migration) to non-zero,
43534e0b1ab7SFan Zhang 	 * we should enable GS here instead of doing the lazy enablement.
43544e0b1ab7SFan Zhang 	 */
43554e0b1ab7SFan Zhang 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
43564e0b1ab7SFan Zhang 	    test_kvm_facility(vcpu->kvm, 133) &&
43574e0b1ab7SFan Zhang 	    gscb->gssm &&
43584e0b1ab7SFan Zhang 	    !vcpu->arch.gs_enabled) {
43594e0b1ab7SFan Zhang 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
43604e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecb |= ECB_GS;
43614e0b1ab7SFan Zhang 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
43624e0b1ab7SFan Zhang 		vcpu->arch.gs_enabled = 1;
436380cd8763SFan Zhang 	}
436435b3fde6SChristian Borntraeger 	if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
436535b3fde6SChristian Borntraeger 	    test_kvm_facility(vcpu->kvm, 82)) {
436635b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
436735b3fde6SChristian Borntraeger 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
436835b3fde6SChristian Borntraeger 	}
43694e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
43704e0b1ab7SFan Zhang 		preempt_disable();
43714e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
43724e0b1ab7SFan Zhang 		if (current->thread.gs_cb) {
43734e0b1ab7SFan Zhang 			vcpu->arch.host_gscb = current->thread.gs_cb;
43744e0b1ab7SFan Zhang 			save_gs_cb(vcpu->arch.host_gscb);
43754e0b1ab7SFan Zhang 		}
43764e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled) {
43774e0b1ab7SFan Zhang 			current->thread.gs_cb = (struct gs_cb *)
43784e0b1ab7SFan Zhang 						&vcpu->run->s.regs.gscb;
43794e0b1ab7SFan Zhang 			restore_gs_cb(current->thread.gs_cb);
43804e0b1ab7SFan Zhang 		}
43814e0b1ab7SFan Zhang 		preempt_enable();
43824e0b1ab7SFan Zhang 	}
4383a3da7b4aSChristian Borntraeger 	/* SIE will load etoken directly from SDNX and therefore kvm_run */
4384811ea797SJanosch Frank }
4385811ea797SJanosch Frank 
43862f0a83beSTianjia Zhang static void sync_regs(struct kvm_vcpu *vcpu)
4387811ea797SJanosch Frank {
43882f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
43892f0a83beSTianjia Zhang 
4390811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4391811ea797SJanosch Frank 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4392811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4393811ea797SJanosch Frank 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4394811ea797SJanosch Frank 		/* some control register changes require a tlb flush */
4395811ea797SJanosch Frank 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4396811ea797SJanosch Frank 	}
4397811ea797SJanosch Frank 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4398811ea797SJanosch Frank 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4399811ea797SJanosch Frank 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4400811ea797SJanosch Frank 	}
4401811ea797SJanosch Frank 	save_access_regs(vcpu->arch.host_acrs);
4402811ea797SJanosch Frank 	restore_access_regs(vcpu->run->s.regs.acrs);
4403811ea797SJanosch Frank 	/* save host (userspace) fprs/vrs */
4404811ea797SJanosch Frank 	save_fpu_regs();
4405811ea797SJanosch Frank 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4406811ea797SJanosch Frank 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4407811ea797SJanosch Frank 	if (MACHINE_HAS_VX)
4408811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4409811ea797SJanosch Frank 	else
4410811ea797SJanosch Frank 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4411811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4412811ea797SJanosch Frank 	if (test_fp_ctl(current->thread.fpu.fpc))
4413811ea797SJanosch Frank 		/* User space provided an invalid FPC, let's clear it */
4414811ea797SJanosch Frank 		current->thread.fpu.fpc = 0;
4415811ea797SJanosch Frank 
4416811ea797SJanosch Frank 	/* Sync fmt2 only data */
4417811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
44182f0a83beSTianjia Zhang 		sync_regs_fmt2(vcpu);
4419811ea797SJanosch Frank 	} else {
4420811ea797SJanosch Frank 		/*
4421811ea797SJanosch Frank 		 * In several places we have to modify our internal view to
4422811ea797SJanosch Frank 		 * not do things that are disallowed by the ultravisor. For
4423811ea797SJanosch Frank 		 * example we must not inject interrupts after specific exits
4424811ea797SJanosch Frank 		 * (e.g. 112 prefix page not secure). We do this by turning
4425811ea797SJanosch Frank 		 * off the machine check, external and I/O interrupt bits
4426811ea797SJanosch Frank 		 * of our PSW copy. To avoid getting validity intercepts, we
4427811ea797SJanosch Frank 		 * do only accept the condition code from userspace.
4428811ea797SJanosch Frank 		 */
4429811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4430811ea797SJanosch Frank 		vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4431811ea797SJanosch Frank 						   PSW_MASK_CC;
4432811ea797SJanosch Frank 	}
443380cd8763SFan Zhang 
4434b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
4435b028ee3eSDavid Hildenbrand }
4436b028ee3eSDavid Hildenbrand 
44372f0a83beSTianjia Zhang static void store_regs_fmt2(struct kvm_vcpu *vcpu)
4438b028ee3eSDavid Hildenbrand {
44392f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
44402f0a83beSTianjia Zhang 
4441b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4442b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4443b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
444435b3fde6SChristian Borntraeger 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
444523a60f83SCollin Walling 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
44464e0b1ab7SFan Zhang 	if (MACHINE_HAS_GS) {
444744bada28SHeiko Carstens 		preempt_disable();
44484e0b1ab7SFan Zhang 		__ctl_set_bit(2, 4);
44494e0b1ab7SFan Zhang 		if (vcpu->arch.gs_enabled)
44504e0b1ab7SFan Zhang 			save_gs_cb(current->thread.gs_cb);
44514e0b1ab7SFan Zhang 		current->thread.gs_cb = vcpu->arch.host_gscb;
44524e0b1ab7SFan Zhang 		restore_gs_cb(vcpu->arch.host_gscb);
44534e0b1ab7SFan Zhang 		if (!vcpu->arch.host_gscb)
44544e0b1ab7SFan Zhang 			__ctl_clear_bit(2, 4);
44554e0b1ab7SFan Zhang 		vcpu->arch.host_gscb = NULL;
445644bada28SHeiko Carstens 		preempt_enable();
44574e0b1ab7SFan Zhang 	}
4458a3da7b4aSChristian Borntraeger 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
4459b028ee3eSDavid Hildenbrand }
4460b028ee3eSDavid Hildenbrand 
44612f0a83beSTianjia Zhang static void store_regs(struct kvm_vcpu *vcpu)
4462811ea797SJanosch Frank {
44632f0a83beSTianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
44642f0a83beSTianjia Zhang 
4465811ea797SJanosch Frank 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4466811ea797SJanosch Frank 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4467811ea797SJanosch Frank 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4468811ea797SJanosch Frank 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4469811ea797SJanosch Frank 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4470811ea797SJanosch Frank 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4471811ea797SJanosch Frank 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4472811ea797SJanosch Frank 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4473811ea797SJanosch Frank 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4474811ea797SJanosch Frank 	save_access_regs(vcpu->run->s.regs.acrs);
4475811ea797SJanosch Frank 	restore_access_regs(vcpu->arch.host_acrs);
4476811ea797SJanosch Frank 	/* Save guest register state */
4477811ea797SJanosch Frank 	save_fpu_regs();
4478811ea797SJanosch Frank 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4479811ea797SJanosch Frank 	/* Restore will be done lazily at return */
4480811ea797SJanosch Frank 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4481811ea797SJanosch Frank 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4482811ea797SJanosch Frank 	if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
44832f0a83beSTianjia Zhang 		store_regs_fmt2(vcpu);
4484811ea797SJanosch Frank }
4485811ea797SJanosch Frank 
44861b94f6f8STianjia Zhang int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
4487b0c632dbSHeiko Carstens {
44881b94f6f8STianjia Zhang 	struct kvm_run *kvm_run = vcpu->run;
44898f2abe6aSChristian Borntraeger 	int rc;
4490b0c632dbSHeiko Carstens 
4491460df4c1SPaolo Bonzini 	if (kvm_run->immediate_exit)
4492460df4c1SPaolo Bonzini 		return -EINTR;
4493460df4c1SPaolo Bonzini 
4494200824f5SThomas Huth 	if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4495200824f5SThomas Huth 	    kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4496200824f5SThomas Huth 		return -EINVAL;
4497200824f5SThomas Huth 
4498accb757dSChristoffer Dall 	vcpu_load(vcpu);
4499accb757dSChristoffer Dall 
450027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
450127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
4502accb757dSChristoffer Dall 		rc = 0;
4503accb757dSChristoffer Dall 		goto out;
450427291e21SDavid Hildenbrand 	}
450527291e21SDavid Hildenbrand 
450620b7035cSJan H. Schönherr 	kvm_sigset_activate(vcpu);
4507b0c632dbSHeiko Carstens 
4508fe28c786SJanosch Frank 	/*
4509fe28c786SJanosch Frank 	 * no need to check the return value of vcpu_start as it can only have
4510fe28c786SJanosch Frank 	 * an error for protvirt, but protvirt means user cpu state
4511fe28c786SJanosch Frank 	 */
45126352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
45136852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
45146352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
4515ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
45166352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
4517accb757dSChristoffer Dall 		rc = -EINVAL;
4518accb757dSChristoffer Dall 		goto out;
45196352e4d2SDavid Hildenbrand 	}
4520b0c632dbSHeiko Carstens 
45212f0a83beSTianjia Zhang 	sync_regs(vcpu);
4522db0758b2SDavid Hildenbrand 	enable_cpu_timer_accounting(vcpu);
4523d7b0b5ebSCarsten Otte 
4524dab4079dSHeiko Carstens 	might_fault();
4525e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
45269ace903dSChristian Ehrhardt 
4527b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
4528b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
45298f2abe6aSChristian Borntraeger 		rc = -EINTR;
4530b1d16c49SChristian Ehrhardt 	}
45318f2abe6aSChristian Borntraeger 
453227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
453327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
453427291e21SDavid Hildenbrand 		rc = 0;
453527291e21SDavid Hildenbrand 	}
453627291e21SDavid Hildenbrand 
45378f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
453871f116bfSDavid Hildenbrand 		/* userspace support is needed, kvm_run has been prepared */
45398f2abe6aSChristian Borntraeger 		rc = 0;
45408f2abe6aSChristian Borntraeger 	}
45418f2abe6aSChristian Borntraeger 
4542db0758b2SDavid Hildenbrand 	disable_cpu_timer_accounting(vcpu);
45432f0a83beSTianjia Zhang 	store_regs(vcpu);
4544d7b0b5ebSCarsten Otte 
454520b7035cSJan H. Schönherr 	kvm_sigset_deactivate(vcpu);
4546b0c632dbSHeiko Carstens 
4547b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
4548accb757dSChristoffer Dall out:
4549accb757dSChristoffer Dall 	vcpu_put(vcpu);
45507e8e6ab4SHeiko Carstens 	return rc;
4551b0c632dbSHeiko Carstens }
4552b0c632dbSHeiko Carstens 
4553b0c632dbSHeiko Carstens /*
4554b0c632dbSHeiko Carstens  * store status at address
4555b0c632dbSHeiko Carstens  * we use have two special cases:
4556b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4557b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4558b0c632dbSHeiko Carstens  */
4559d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
4560b0c632dbSHeiko Carstens {
4561092670cdSCarsten Otte 	unsigned char archmode = 1;
45629abc2a08SDavid Hildenbrand 	freg_t fprs[NUM_FPRS];
4563fda902cbSMichael Mueller 	unsigned int px;
45644287f247SDavid Hildenbrand 	u64 clkcomp, cputm;
4565d0bce605SHeiko Carstens 	int rc;
4566b0c632dbSHeiko Carstens 
4567d9a3a09aSMartin Schwidefsky 	px = kvm_s390_get_prefix(vcpu);
4568d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4569d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
4570b0c632dbSHeiko Carstens 			return -EFAULT;
4571d9a3a09aSMartin Schwidefsky 		gpa = 0;
4572d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4573d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
4574b0c632dbSHeiko Carstens 			return -EFAULT;
4575d9a3a09aSMartin Schwidefsky 		gpa = px;
4576d9a3a09aSMartin Schwidefsky 	} else
4577d9a3a09aSMartin Schwidefsky 		gpa -= __LC_FPREGS_SAVE_AREA;
45789abc2a08SDavid Hildenbrand 
45799abc2a08SDavid Hildenbrand 	/* manually convert vector registers if necessary */
45809abc2a08SDavid Hildenbrand 	if (MACHINE_HAS_VX) {
45819522b37fSDavid Hildenbrand 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
4582d9a3a09aSMartin Schwidefsky 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
45839abc2a08SDavid Hildenbrand 				     fprs, 128);
45849abc2a08SDavid Hildenbrand 	} else {
45859abc2a08SDavid Hildenbrand 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
45866fd8e67dSDavid Hildenbrand 				     vcpu->run->s.regs.fprs, 128);
45879abc2a08SDavid Hildenbrand 	}
4588d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
4589d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
4590d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
4591d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
4592d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
4593fda902cbSMichael Mueller 			      &px, 4);
4594d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
45959abc2a08SDavid Hildenbrand 			      &vcpu->run->s.regs.fpc, 4);
4596d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
4597d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
45984287f247SDavid Hildenbrand 	cputm = kvm_s390_get_cpu_timer(vcpu);
4599d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
46004287f247SDavid Hildenbrand 			      &cputm, 8);
4601178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
4602d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
4603d0bce605SHeiko Carstens 			      &clkcomp, 8);
4604d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
4605d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
4606d9a3a09aSMartin Schwidefsky 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
4607d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
4608d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
4609b0c632dbSHeiko Carstens }
4610b0c632dbSHeiko Carstens 
4611e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4612e879892cSThomas Huth {
4613e879892cSThomas Huth 	/*
4614e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
461531d8b8d4SChristian Borntraeger 	 * switch in the run ioctl. Let's update our copies before we save
4616e879892cSThomas Huth 	 * it into the save area
4617e879892cSThomas Huth 	 */
4618d0164ee2SHendrik Brueckner 	save_fpu_regs();
46199abc2a08SDavid Hildenbrand 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4620e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
4621e879892cSThomas Huth 
4622e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
4623e879892cSThomas Huth }
4624e879892cSThomas Huth 
46258ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
46268ad35755SDavid Hildenbrand {
46278ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
46288e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
46298ad35755SDavid Hildenbrand }
46308ad35755SDavid Hildenbrand 
46318ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
46328ad35755SDavid Hildenbrand {
46338ad35755SDavid Hildenbrand 	unsigned int i;
46348ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
46358ad35755SDavid Hildenbrand 
46368ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
46378ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
46388ad35755SDavid Hildenbrand 	}
46398ad35755SDavid Hildenbrand }
46408ad35755SDavid Hildenbrand 
46418ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
46428ad35755SDavid Hildenbrand {
464309a400e7SDavid Hildenbrand 	if (!sclp.has_ibs)
464409a400e7SDavid Hildenbrand 		return;
46458ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
46468e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
46478ad35755SDavid Hildenbrand }
46488ad35755SDavid Hildenbrand 
4649fe28c786SJanosch Frank int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
46506852d7b6SDavid Hildenbrand {
4651fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
46528ad35755SDavid Hildenbrand 
46538ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
4654fe28c786SJanosch Frank 		return 0;
46558ad35755SDavid Hildenbrand 
46566852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
46578ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
4658433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
46598ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
46608ad35755SDavid Hildenbrand 
4661fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the operating state */
4662fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4663fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4664fe28c786SJanosch Frank 		if (r) {
4665fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4666fe28c786SJanosch Frank 			return r;
4667fe28c786SJanosch Frank 		}
4668fe28c786SJanosch Frank 	}
4669fe28c786SJanosch Frank 
46708ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
46718ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
46728ad35755SDavid Hildenbrand 			started_vcpus++;
46738ad35755SDavid Hildenbrand 	}
46748ad35755SDavid Hildenbrand 
46758ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
46768ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
46778ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
46788ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
46798ad35755SDavid Hildenbrand 		/*
46808ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
46818ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
468238860756SBhaskar Chowdhury 		 * outstanding ENABLE requests.
46838ad35755SDavid Hildenbrand 		 */
46848ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
46858ad35755SDavid Hildenbrand 	}
46868ad35755SDavid Hildenbrand 
46879daecfc6SDavid Hildenbrand 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
46888ad35755SDavid Hildenbrand 	/*
468972f21820SChristian Borntraeger 	 * The real PSW might have changed due to a RESTART interpreted by the
469072f21820SChristian Borntraeger 	 * ultravisor. We block all interrupts and let the next sie exit
469172f21820SChristian Borntraeger 	 * refresh our view.
469272f21820SChristian Borntraeger 	 */
469372f21820SChristian Borntraeger 	if (kvm_s390_pv_cpu_is_protected(vcpu))
469472f21820SChristian Borntraeger 		vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
469572f21820SChristian Borntraeger 	/*
46968ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
46978ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
46988ad35755SDavid Hildenbrand 	 */
4699d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4700433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4701fe28c786SJanosch Frank 	return 0;
47026852d7b6SDavid Hildenbrand }
47036852d7b6SDavid Hildenbrand 
4704fe28c786SJanosch Frank int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
47056852d7b6SDavid Hildenbrand {
4706fe28c786SJanosch Frank 	int i, online_vcpus, r = 0, started_vcpus = 0;
47078ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
47088ad35755SDavid Hildenbrand 
47098ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
4710fe28c786SJanosch Frank 		return 0;
47118ad35755SDavid Hildenbrand 
47126852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
47138ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
4714433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
47158ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
47168ad35755SDavid Hildenbrand 
4717fe28c786SJanosch Frank 	/* Let's tell the UV that we want to change into the stopped state */
4718fe28c786SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4719fe28c786SJanosch Frank 		r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4720fe28c786SJanosch Frank 		if (r) {
4721fe28c786SJanosch Frank 			spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4722fe28c786SJanosch Frank 			return r;
4723fe28c786SJanosch Frank 		}
4724fe28c786SJanosch Frank 	}
4725fe28c786SJanosch Frank 
472632f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
47276cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
472832f5ff63SDavid Hildenbrand 
4729ef8f4f49SDavid Hildenbrand 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
47308ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
47318ad35755SDavid Hildenbrand 
47328ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
47338ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
47348ad35755SDavid Hildenbrand 			started_vcpus++;
47358ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
47368ad35755SDavid Hildenbrand 		}
47378ad35755SDavid Hildenbrand 	}
47388ad35755SDavid Hildenbrand 
47398ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
47408ad35755SDavid Hildenbrand 		/*
47418ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
47428ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
47438ad35755SDavid Hildenbrand 		 */
47448ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
47458ad35755SDavid Hildenbrand 	}
47468ad35755SDavid Hildenbrand 
4747433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4748fe28c786SJanosch Frank 	return 0;
47496852d7b6SDavid Hildenbrand }
47506852d7b6SDavid Hildenbrand 
4751d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4752d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
4753d6712df9SCornelia Huck {
4754d6712df9SCornelia Huck 	int r;
4755d6712df9SCornelia Huck 
4756d6712df9SCornelia Huck 	if (cap->flags)
4757d6712df9SCornelia Huck 		return -EINVAL;
4758d6712df9SCornelia Huck 
4759d6712df9SCornelia Huck 	switch (cap->cap) {
4760fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
4761fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
4762fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
4763c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
4764fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
4765fa6b7fe9SCornelia Huck 		}
4766fa6b7fe9SCornelia Huck 		r = 0;
4767fa6b7fe9SCornelia Huck 		break;
4768d6712df9SCornelia Huck 	default:
4769d6712df9SCornelia Huck 		r = -EINVAL;
4770d6712df9SCornelia Huck 		break;
4771d6712df9SCornelia Huck 	}
4772d6712df9SCornelia Huck 	return r;
4773d6712df9SCornelia Huck }
4774d6712df9SCornelia Huck 
477519e12277SJanosch Frank static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
477619e12277SJanosch Frank 				   struct kvm_s390_mem_op *mop)
477719e12277SJanosch Frank {
477819e12277SJanosch Frank 	void __user *uaddr = (void __user *)mop->buf;
477919e12277SJanosch Frank 	int r = 0;
478019e12277SJanosch Frank 
478119e12277SJanosch Frank 	if (mop->flags || !mop->size)
478219e12277SJanosch Frank 		return -EINVAL;
478319e12277SJanosch Frank 	if (mop->size + mop->sida_offset < mop->size)
478419e12277SJanosch Frank 		return -EINVAL;
478519e12277SJanosch Frank 	if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
478619e12277SJanosch Frank 		return -E2BIG;
478719e12277SJanosch Frank 
478819e12277SJanosch Frank 	switch (mop->op) {
478919e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
479019e12277SJanosch Frank 		if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
479119e12277SJanosch Frank 				 mop->sida_offset), mop->size))
479219e12277SJanosch Frank 			r = -EFAULT;
479319e12277SJanosch Frank 
479419e12277SJanosch Frank 		break;
479519e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
479619e12277SJanosch Frank 		if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
479719e12277SJanosch Frank 				   mop->sida_offset), uaddr, mop->size))
479819e12277SJanosch Frank 			r = -EFAULT;
479919e12277SJanosch Frank 		break;
480019e12277SJanosch Frank 	}
480119e12277SJanosch Frank 	return r;
480219e12277SJanosch Frank }
480341408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
480441408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
480541408c28SThomas Huth {
480641408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
480741408c28SThomas Huth 	void *tmpbuf = NULL;
480819e12277SJanosch Frank 	int r = 0;
480941408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
481041408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
481141408c28SThomas Huth 
4812a13b03bbSThomas Huth 	if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
481341408c28SThomas Huth 		return -EINVAL;
481441408c28SThomas Huth 
481541408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
481641408c28SThomas Huth 		return -E2BIG;
481741408c28SThomas Huth 
481819e12277SJanosch Frank 	if (kvm_s390_pv_cpu_is_protected(vcpu))
481919e12277SJanosch Frank 		return -EINVAL;
482019e12277SJanosch Frank 
482141408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
482241408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
482341408c28SThomas Huth 		if (!tmpbuf)
482441408c28SThomas Huth 			return -ENOMEM;
482541408c28SThomas Huth 	}
482641408c28SThomas Huth 
482741408c28SThomas Huth 	switch (mop->op) {
482841408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
482941408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
483092c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
483192c96321SDavid Hildenbrand 					    mop->size, GACC_FETCH);
483241408c28SThomas Huth 			break;
483341408c28SThomas Huth 		}
483441408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
483541408c28SThomas Huth 		if (r == 0) {
483641408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
483741408c28SThomas Huth 				r = -EFAULT;
483841408c28SThomas Huth 		}
483941408c28SThomas Huth 		break;
484041408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
484141408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
484292c96321SDavid Hildenbrand 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
484392c96321SDavid Hildenbrand 					    mop->size, GACC_STORE);
484441408c28SThomas Huth 			break;
484541408c28SThomas Huth 		}
484641408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
484741408c28SThomas Huth 			r = -EFAULT;
484841408c28SThomas Huth 			break;
484941408c28SThomas Huth 		}
485041408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
485141408c28SThomas Huth 		break;
485241408c28SThomas Huth 	}
485341408c28SThomas Huth 
485441408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
485541408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
485641408c28SThomas Huth 
485741408c28SThomas Huth 	vfree(tmpbuf);
485841408c28SThomas Huth 	return r;
485941408c28SThomas Huth }
486041408c28SThomas Huth 
486119e12277SJanosch Frank static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
486219e12277SJanosch Frank 				      struct kvm_s390_mem_op *mop)
486319e12277SJanosch Frank {
486419e12277SJanosch Frank 	int r, srcu_idx;
486519e12277SJanosch Frank 
486619e12277SJanosch Frank 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
486719e12277SJanosch Frank 
486819e12277SJanosch Frank 	switch (mop->op) {
486919e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_READ:
487019e12277SJanosch Frank 	case KVM_S390_MEMOP_LOGICAL_WRITE:
487119e12277SJanosch Frank 		r = kvm_s390_guest_mem_op(vcpu, mop);
487219e12277SJanosch Frank 		break;
487319e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_READ:
487419e12277SJanosch Frank 	case KVM_S390_MEMOP_SIDA_WRITE:
487519e12277SJanosch Frank 		/* we are locked against sida going away by the vcpu->mutex */
487619e12277SJanosch Frank 		r = kvm_s390_guest_sida_op(vcpu, mop);
487719e12277SJanosch Frank 		break;
487819e12277SJanosch Frank 	default:
487919e12277SJanosch Frank 		r = -EINVAL;
488019e12277SJanosch Frank 	}
488119e12277SJanosch Frank 
488219e12277SJanosch Frank 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
488319e12277SJanosch Frank 	return r;
488419e12277SJanosch Frank }
488519e12277SJanosch Frank 
48865cb0944cSPaolo Bonzini long kvm_arch_vcpu_async_ioctl(struct file *filp,
4887b0c632dbSHeiko Carstens 			       unsigned int ioctl, unsigned long arg)
4888b0c632dbSHeiko Carstens {
4889b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
4890b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
4891b0c632dbSHeiko Carstens 
489293736624SAvi Kivity 	switch (ioctl) {
489347b43c52SJens Freimann 	case KVM_S390_IRQ: {
489447b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
489547b43c52SJens Freimann 
489647b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
48979b062471SChristoffer Dall 			return -EFAULT;
48989b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
489947b43c52SJens Freimann 	}
490093736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
4901ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
490253936b5bSThomas Huth 		struct kvm_s390_irq s390irq = {};
4903ba5c1e9bSCarsten Otte 
4904ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
49059b062471SChristoffer Dall 			return -EFAULT;
4906383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
4907383d0b05SJens Freimann 			return -EINVAL;
49089b062471SChristoffer Dall 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
4909ba5c1e9bSCarsten Otte 	}
49109b062471SChristoffer Dall 	}
49115cb0944cSPaolo Bonzini 	return -ENOIOCTLCMD;
49125cb0944cSPaolo Bonzini }
49135cb0944cSPaolo Bonzini 
49145cb0944cSPaolo Bonzini long kvm_arch_vcpu_ioctl(struct file *filp,
49155cb0944cSPaolo Bonzini 			 unsigned int ioctl, unsigned long arg)
49165cb0944cSPaolo Bonzini {
49175cb0944cSPaolo Bonzini 	struct kvm_vcpu *vcpu = filp->private_data;
49185cb0944cSPaolo Bonzini 	void __user *argp = (void __user *)arg;
49195cb0944cSPaolo Bonzini 	int idx;
49205cb0944cSPaolo Bonzini 	long r;
49218a8378faSJanosch Frank 	u16 rc, rrc;
49229b062471SChristoffer Dall 
49239b062471SChristoffer Dall 	vcpu_load(vcpu);
49249b062471SChristoffer Dall 
49259b062471SChristoffer Dall 	switch (ioctl) {
4926b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
4927800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
492855680890SChristian Borntraeger 		r = kvm_s390_store_status_unloaded(vcpu, arg);
4929800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
4930bc923cc9SAvi Kivity 		break;
4931b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
4932b0c632dbSHeiko Carstens 		psw_t psw;
4933b0c632dbSHeiko Carstens 
4934bc923cc9SAvi Kivity 		r = -EFAULT;
4935b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
4936bc923cc9SAvi Kivity 			break;
4937bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4938bc923cc9SAvi Kivity 		break;
4939b0c632dbSHeiko Carstens 	}
49407de3f142SJanosch Frank 	case KVM_S390_CLEAR_RESET:
49417de3f142SJanosch Frank 		r = 0;
49427de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_clear_reset(vcpu);
49438a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
49448a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
49458a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
49468a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
49478a8378faSJanosch Frank 				   rc, rrc);
49488a8378faSJanosch Frank 		}
49497de3f142SJanosch Frank 		break;
4950b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
49517de3f142SJanosch Frank 		r = 0;
49527de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_initial_reset(vcpu);
49538a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
49548a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
49558a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET_INITIAL,
49568a8378faSJanosch Frank 					  &rc, &rrc);
49578a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
49588a8378faSJanosch Frank 				   rc, rrc);
49598a8378faSJanosch Frank 		}
49607de3f142SJanosch Frank 		break;
49617de3f142SJanosch Frank 	case KVM_S390_NORMAL_RESET:
49627de3f142SJanosch Frank 		r = 0;
49637de3f142SJanosch Frank 		kvm_arch_vcpu_ioctl_normal_reset(vcpu);
49648a8378faSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
49658a8378faSJanosch Frank 			r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
49668a8378faSJanosch Frank 					  UVC_CMD_CPU_RESET, &rc, &rrc);
49678a8378faSJanosch Frank 			VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
49688a8378faSJanosch Frank 				   rc, rrc);
49698a8378faSJanosch Frank 		}
4970bc923cc9SAvi Kivity 		break;
497114eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
497214eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
497314eebd91SCarsten Otte 		struct kvm_one_reg reg;
497468cf7b1fSJanosch Frank 		r = -EINVAL;
497568cf7b1fSJanosch Frank 		if (kvm_s390_pv_cpu_is_protected(vcpu))
497668cf7b1fSJanosch Frank 			break;
497714eebd91SCarsten Otte 		r = -EFAULT;
497814eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
497914eebd91SCarsten Otte 			break;
498014eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
498114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
498214eebd91SCarsten Otte 		else
498314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
498414eebd91SCarsten Otte 		break;
498514eebd91SCarsten Otte 	}
498627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
498727e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
498827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
498927e0393fSCarsten Otte 
499027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
499127e0393fSCarsten Otte 			r = -EFAULT;
499227e0393fSCarsten Otte 			break;
499327e0393fSCarsten Otte 		}
499427e0393fSCarsten Otte 
499527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
499627e0393fSCarsten Otte 			r = -EINVAL;
499727e0393fSCarsten Otte 			break;
499827e0393fSCarsten Otte 		}
499927e0393fSCarsten Otte 
500027e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
500127e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
500227e0393fSCarsten Otte 		break;
500327e0393fSCarsten Otte 	}
500427e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
500527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
500627e0393fSCarsten Otte 
500727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
500827e0393fSCarsten Otte 			r = -EFAULT;
500927e0393fSCarsten Otte 			break;
501027e0393fSCarsten Otte 		}
501127e0393fSCarsten Otte 
501227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
501327e0393fSCarsten Otte 			r = -EINVAL;
501427e0393fSCarsten Otte 			break;
501527e0393fSCarsten Otte 		}
501627e0393fSCarsten Otte 
501727e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
501827e0393fSCarsten Otte 			ucasmap.length);
501927e0393fSCarsten Otte 		break;
502027e0393fSCarsten Otte 	}
502127e0393fSCarsten Otte #endif
5022ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
5023527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
5024ccc7910fSCarsten Otte 		break;
5025ccc7910fSCarsten Otte 	}
5026d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
5027d6712df9SCornelia Huck 	{
5028d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
5029d6712df9SCornelia Huck 		r = -EFAULT;
5030d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
5031d6712df9SCornelia Huck 			break;
5032d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5033d6712df9SCornelia Huck 		break;
5034d6712df9SCornelia Huck 	}
503541408c28SThomas Huth 	case KVM_S390_MEM_OP: {
503641408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
503741408c28SThomas Huth 
503841408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
503919e12277SJanosch Frank 			r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
504041408c28SThomas Huth 		else
504141408c28SThomas Huth 			r = -EFAULT;
504241408c28SThomas Huth 		break;
504341408c28SThomas Huth 	}
5044816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
5045816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5046816c7667SJens Freimann 
5047816c7667SJens Freimann 		r = -EFAULT;
5048816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5049816c7667SJens Freimann 			break;
5050816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5051816c7667SJens Freimann 		    irq_state.len == 0 ||
5052816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5053816c7667SJens Freimann 			r = -EINVAL;
5054816c7667SJens Freimann 			break;
5055816c7667SJens Freimann 		}
5056bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5057816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
5058816c7667SJens Freimann 					   (void __user *) irq_state.buf,
5059816c7667SJens Freimann 					   irq_state.len);
5060816c7667SJens Freimann 		break;
5061816c7667SJens Freimann 	}
5062816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
5063816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
5064816c7667SJens Freimann 
5065816c7667SJens Freimann 		r = -EFAULT;
5066816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5067816c7667SJens Freimann 			break;
5068816c7667SJens Freimann 		if (irq_state.len == 0) {
5069816c7667SJens Freimann 			r = -EINVAL;
5070816c7667SJens Freimann 			break;
5071816c7667SJens Freimann 		}
5072bb64da9aSChristian Borntraeger 		/* do not use irq_state.flags, it will break old QEMUs */
5073816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
5074816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
5075816c7667SJens Freimann 					   irq_state.len);
5076816c7667SJens Freimann 		break;
5077816c7667SJens Freimann 	}
5078b0c632dbSHeiko Carstens 	default:
50793e6afcf1SCarsten Otte 		r = -ENOTTY;
5080b0c632dbSHeiko Carstens 	}
50819b062471SChristoffer Dall 
50829b062471SChristoffer Dall 	vcpu_put(vcpu);
5083bc923cc9SAvi Kivity 	return r;
5084b0c632dbSHeiko Carstens }
5085b0c632dbSHeiko Carstens 
50861499fa80SSouptick Joarder vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
50875b1c1493SCarsten Otte {
50885b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
50895b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
50905b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
50915b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
50925b1c1493SCarsten Otte 		get_page(vmf->page);
50935b1c1493SCarsten Otte 		return 0;
50945b1c1493SCarsten Otte 	}
50955b1c1493SCarsten Otte #endif
50965b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
50975b1c1493SCarsten Otte }
50985b1c1493SCarsten Otte 
5099b0c632dbSHeiko Carstens /* Section: memory related */
5100f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
5101f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
510209170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
51037b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
5104b0c632dbSHeiko Carstens {
5105dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
5106dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
5107dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
5108dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
5109b0c632dbSHeiko Carstens 
5110598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
5111b0c632dbSHeiko Carstens 		return -EINVAL;
5112b0c632dbSHeiko Carstens 
5113598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
5114b0c632dbSHeiko Carstens 		return -EINVAL;
5115b0c632dbSHeiko Carstens 
5116a3a92c31SDominik Dingel 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
5117a3a92c31SDominik Dingel 		return -EINVAL;
5118a3a92c31SDominik Dingel 
511929b40f10SJanosch Frank 	/* When we are protected, we should not change the memory slots */
512029b40f10SJanosch Frank 	if (kvm_s390_pv_get_handle(kvm))
512129b40f10SJanosch Frank 		return -EINVAL;
5122f7784b8eSMarcelo Tosatti 	return 0;
5123f7784b8eSMarcelo Tosatti }
5124f7784b8eSMarcelo Tosatti 
5125f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
512609170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
51279d4c197cSSean Christopherson 				struct kvm_memory_slot *old,
5128f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
51298482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
5130f7784b8eSMarcelo Tosatti {
513119ec166cSChristian Borntraeger 	int rc = 0;
5132f7784b8eSMarcelo Tosatti 
513319ec166cSChristian Borntraeger 	switch (change) {
513419ec166cSChristian Borntraeger 	case KVM_MR_DELETE:
513519ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
513619ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
513719ec166cSChristian Borntraeger 		break;
513819ec166cSChristian Borntraeger 	case KVM_MR_MOVE:
513919ec166cSChristian Borntraeger 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
514019ec166cSChristian Borntraeger 					old->npages * PAGE_SIZE);
514119ec166cSChristian Borntraeger 		if (rc)
514219ec166cSChristian Borntraeger 			break;
51433b684a42SJoe Perches 		fallthrough;
514419ec166cSChristian Borntraeger 	case KVM_MR_CREATE:
5145598841caSCarsten Otte 		rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5146598841caSCarsten Otte 				      mem->guest_phys_addr, mem->memory_size);
514719ec166cSChristian Borntraeger 		break;
514819ec166cSChristian Borntraeger 	case KVM_MR_FLAGS_ONLY:
514919ec166cSChristian Borntraeger 		break;
515019ec166cSChristian Borntraeger 	default:
515119ec166cSChristian Borntraeger 		WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
515219ec166cSChristian Borntraeger 	}
5153598841caSCarsten Otte 	if (rc)
5154ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
5155598841caSCarsten Otte 	return;
5156b0c632dbSHeiko Carstens }
5157b0c632dbSHeiko Carstens 
515860a37709SAlexander Yarygin static inline unsigned long nonhyp_mask(int i)
515960a37709SAlexander Yarygin {
516060a37709SAlexander Yarygin 	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
516160a37709SAlexander Yarygin 
516260a37709SAlexander Yarygin 	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
516360a37709SAlexander Yarygin }
516460a37709SAlexander Yarygin 
51653491caf2SChristian Borntraeger void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
51663491caf2SChristian Borntraeger {
51673491caf2SChristian Borntraeger 	vcpu->valid_wakeup = false;
51683491caf2SChristian Borntraeger }
51693491caf2SChristian Borntraeger 
5170b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
5171b0c632dbSHeiko Carstens {
517260a37709SAlexander Yarygin 	int i;
517360a37709SAlexander Yarygin 
517407197fd0SDavid Hildenbrand 	if (!sclp.has_sief2) {
51758d43d570SMichael Mueller 		pr_info("SIE is not available\n");
517607197fd0SDavid Hildenbrand 		return -ENODEV;
517707197fd0SDavid Hildenbrand 	}
517807197fd0SDavid Hildenbrand 
5179a4499382SJanosch Frank 	if (nested && hpage) {
51808d43d570SMichael Mueller 		pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5181a4499382SJanosch Frank 		return -EINVAL;
5182a4499382SJanosch Frank 	}
5183a4499382SJanosch Frank 
518460a37709SAlexander Yarygin 	for (i = 0; i < 16; i++)
5185c3b9e3e1SChristian Borntraeger 		kvm_s390_fac_base[i] |=
518660a37709SAlexander Yarygin 			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
518760a37709SAlexander Yarygin 
51889d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5189b0c632dbSHeiko Carstens }
5190b0c632dbSHeiko Carstens 
5191b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
5192b0c632dbSHeiko Carstens {
5193b0c632dbSHeiko Carstens 	kvm_exit();
5194b0c632dbSHeiko Carstens }
5195b0c632dbSHeiko Carstens 
5196b0c632dbSHeiko Carstens module_init(kvm_s390_init);
5197b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
5198566af940SCornelia Huck 
5199566af940SCornelia Huck /*
5200566af940SCornelia Huck  * Enable autoloading of the kvm module.
5201566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5202566af940SCornelia Huck  * since x86 takes a different approach.
5203566af940SCornelia Huck  */
5204566af940SCornelia Huck #include <linux/miscdevice.h>
5205566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
5206566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
5207