xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision fdf036507f1fc036d5a06753e9e8b13f46de73e8)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
2841408c28SThomas Huth #include <linux/vmalloc.h>
29cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
30b0c632dbSHeiko Carstens #include <asm/lowcore.h>
31*fdf03650SFan Zhang #include <asm/etr.h>
32b0c632dbSHeiko Carstens #include <asm/pgtable.h>
33f5daba1dSHeiko Carstens #include <asm/nmi.h>
34a0616cdeSDavid Howells #include <asm/switch_to.h>
356d3da241SJens Freimann #include <asm/isc.h>
361526bf9cSChristian Borntraeger #include <asm/sclp.h>
378f2abe6aSChristian Borntraeger #include "kvm-s390.h"
38b0c632dbSHeiko Carstens #include "gaccess.h"
39b0c632dbSHeiko Carstens 
40ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
41ea2cdd27SDavid Hildenbrand #undef pr_fmt
42ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43ea2cdd27SDavid Hildenbrand 
445786fffaSCornelia Huck #define CREATE_TRACE_POINTS
455786fffaSCornelia Huck #include "trace.h"
46ade38c31SCornelia Huck #include "trace-s390.h"
475786fffaSCornelia Huck 
4841408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
49816c7667SJens Freimann #define LOCAL_IRQS 32
50816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5241408c28SThomas Huth 
53b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54b0c632dbSHeiko Carstens 
55b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
56b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
570eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
588f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
598f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
608f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
618f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
63ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
67f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
68ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
69aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
70aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
71ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
727697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
73ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
74ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
75ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
76ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
77ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
78ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
79ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8069d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
81453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
82453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
83453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
84453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
85453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
868a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
87453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
88453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
89b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
90453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
91453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
92bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
935288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
94bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
957697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
965288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
9742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
9842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
995288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
10042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
10142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
102cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1045288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1055288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
10642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
10742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
10842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
109388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
110e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
11141628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
112175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
113175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
114175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
115b0c632dbSHeiko Carstens 	{ NULL }
116b0c632dbSHeiko Carstens };
117b0c632dbSHeiko Carstens 
1189d8d5786SMichael Mueller /* upper facilities limit for kvm */
1199d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
120a3ed8daeSChristian Borntraeger 	0xffe6fffbfcfdfc40UL,
12153df84f8SGuenther Hutzl 	0x005e800000000000UL,
1229d8d5786SMichael Mueller };
123b0c632dbSHeiko Carstens 
1249d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
12578c4b59fSMichael Mueller {
1269d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1279d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
12878c4b59fSMichael Mueller }
12978c4b59fSMichael Mueller 
1309d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
13178f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1329d8d5786SMichael Mueller 
133b0c632dbSHeiko Carstens /* Section: not file related */
13413a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
135b0c632dbSHeiko Carstens {
136b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
13710474ae8SAlexander Graf 	return 0;
138b0c632dbSHeiko Carstens }
139b0c632dbSHeiko Carstens 
1402c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1412c70fe44SChristian Borntraeger 
142*fdf03650SFan Zhang /*
143*fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
144*fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
145*fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
146*fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
147*fdf03650SFan Zhang  */
148*fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
149*fdf03650SFan Zhang 			  void *v)
150*fdf03650SFan Zhang {
151*fdf03650SFan Zhang 	struct kvm *kvm;
152*fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
153*fdf03650SFan Zhang 	int i;
154*fdf03650SFan Zhang 	unsigned long long *delta = v;
155*fdf03650SFan Zhang 
156*fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
157*fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
158*fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
159*fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
160*fdf03650SFan Zhang 		}
161*fdf03650SFan Zhang 	}
162*fdf03650SFan Zhang 	return NOTIFY_OK;
163*fdf03650SFan Zhang }
164*fdf03650SFan Zhang 
165*fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
166*fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
167*fdf03650SFan Zhang };
168*fdf03650SFan Zhang 
169b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
170b0c632dbSHeiko Carstens {
1712c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1722c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
173*fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
174*fdf03650SFan Zhang 				       &kvm_clock_notifier);
175b0c632dbSHeiko Carstens 	return 0;
176b0c632dbSHeiko Carstens }
177b0c632dbSHeiko Carstens 
178b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
179b0c632dbSHeiko Carstens {
1802c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
181*fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
182*fdf03650SFan Zhang 					 &kvm_clock_notifier);
183b0c632dbSHeiko Carstens }
184b0c632dbSHeiko Carstens 
185b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
186b0c632dbSHeiko Carstens {
18778f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
18878f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
18978f26131SChristian Borntraeger 		return -ENOMEM;
19078f26131SChristian Borntraeger 
19178f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
19278f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
19378f26131SChristian Borntraeger 		return -ENOMEM;
19478f26131SChristian Borntraeger 	}
19578f26131SChristian Borntraeger 
19684877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
19784877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
198b0c632dbSHeiko Carstens }
199b0c632dbSHeiko Carstens 
20078f26131SChristian Borntraeger void kvm_arch_exit(void)
20178f26131SChristian Borntraeger {
20278f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
20378f26131SChristian Borntraeger }
20478f26131SChristian Borntraeger 
205b0c632dbSHeiko Carstens /* Section: device related */
206b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
207b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
208b0c632dbSHeiko Carstens {
209b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
210b0c632dbSHeiko Carstens 		return s390_enable_sie();
211b0c632dbSHeiko Carstens 	return -EINVAL;
212b0c632dbSHeiko Carstens }
213b0c632dbSHeiko Carstens 
214784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
215b0c632dbSHeiko Carstens {
216d7b0b5ebSCarsten Otte 	int r;
217d7b0b5ebSCarsten Otte 
2182bd0ac4eSCarsten Otte 	switch (ext) {
219d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
220b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
22152e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
2221efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
2231efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
2241efd0f59SCarsten Otte #endif
2253c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
22660b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
22714eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
228d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
229fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
23010ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
231c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
232d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
23378599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
234f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
2356352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
23647b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
2372444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
238e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
23930ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
240816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
241d7b0b5ebSCarsten Otte 		r = 1;
242d7b0b5ebSCarsten Otte 		break;
24341408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
24441408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
24541408c28SThomas Huth 		break;
246e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
247e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
248e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
249e726b1bdSChristian Borntraeger 		break;
250e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
251e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
252e1e2e605SNick Wang 		break;
2531526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
254abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
2551526bf9cSChristian Borntraeger 		break;
25668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
25768c55750SEric Farman 		r = MACHINE_HAS_VX;
25868c55750SEric Farman 		break;
2592bd0ac4eSCarsten Otte 	default:
260d7b0b5ebSCarsten Otte 		r = 0;
261b0c632dbSHeiko Carstens 	}
262d7b0b5ebSCarsten Otte 	return r;
2632bd0ac4eSCarsten Otte }
264b0c632dbSHeiko Carstens 
26515f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
26615f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
26715f36ebdSJason J. Herne {
26815f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
26915f36ebdSJason J. Herne 	unsigned long address;
27015f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
27115f36ebdSJason J. Herne 
27215f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
27315f36ebdSJason J. Herne 	/* Loop over all guest pages */
27415f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
27515f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
27615f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
27715f36ebdSJason J. Herne 
27815f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
27915f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
28015f36ebdSJason J. Herne 	}
28115f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
28215f36ebdSJason J. Herne }
28315f36ebdSJason J. Herne 
284b0c632dbSHeiko Carstens /* Section: vm related */
285b0c632dbSHeiko Carstens /*
286b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
287b0c632dbSHeiko Carstens  */
288b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
289b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
290b0c632dbSHeiko Carstens {
29115f36ebdSJason J. Herne 	int r;
29215f36ebdSJason J. Herne 	unsigned long n;
2939f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
29415f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
29515f36ebdSJason J. Herne 	int is_dirty = 0;
29615f36ebdSJason J. Herne 
29715f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
29815f36ebdSJason J. Herne 
29915f36ebdSJason J. Herne 	r = -EINVAL;
30015f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
30115f36ebdSJason J. Herne 		goto out;
30215f36ebdSJason J. Herne 
3039f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
3049f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
30515f36ebdSJason J. Herne 	r = -ENOENT;
30615f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
30715f36ebdSJason J. Herne 		goto out;
30815f36ebdSJason J. Herne 
30915f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
31015f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
31115f36ebdSJason J. Herne 	if (r)
31215f36ebdSJason J. Herne 		goto out;
31315f36ebdSJason J. Herne 
31415f36ebdSJason J. Herne 	/* Clear the dirty log */
31515f36ebdSJason J. Herne 	if (is_dirty) {
31615f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
31715f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
31815f36ebdSJason J. Herne 	}
31915f36ebdSJason J. Herne 	r = 0;
32015f36ebdSJason J. Herne out:
32115f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
32215f36ebdSJason J. Herne 	return r;
323b0c632dbSHeiko Carstens }
324b0c632dbSHeiko Carstens 
325d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
326d938dc55SCornelia Huck {
327d938dc55SCornelia Huck 	int r;
328d938dc55SCornelia Huck 
329d938dc55SCornelia Huck 	if (cap->flags)
330d938dc55SCornelia Huck 		return -EINVAL;
331d938dc55SCornelia Huck 
332d938dc55SCornelia Huck 	switch (cap->cap) {
33384223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
334c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
33584223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
33684223598SCornelia Huck 		r = 0;
33784223598SCornelia Huck 		break;
3382444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
339c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
3402444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
3412444b352SDavid Hildenbrand 		r = 0;
3422444b352SDavid Hildenbrand 		break;
34368c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
34418280d8bSMichael Mueller 		if (MACHINE_HAS_VX) {
34518280d8bSMichael Mueller 			set_kvm_facility(kvm->arch.model.fac->mask, 129);
34618280d8bSMichael Mueller 			set_kvm_facility(kvm->arch.model.fac->list, 129);
34718280d8bSMichael Mueller 			r = 0;
34818280d8bSMichael Mueller 		} else
34918280d8bSMichael Mueller 			r = -EINVAL;
350c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
351c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
35268c55750SEric Farman 		break;
353e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
354c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
355e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
356e44fc8c9SEkaterina Tumanova 		r = 0;
357e44fc8c9SEkaterina Tumanova 		break;
358d938dc55SCornelia Huck 	default:
359d938dc55SCornelia Huck 		r = -EINVAL;
360d938dc55SCornelia Huck 		break;
361d938dc55SCornelia Huck 	}
362d938dc55SCornelia Huck 	return r;
363d938dc55SCornelia Huck }
364d938dc55SCornelia Huck 
3658c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3668c0a7ce6SDominik Dingel {
3678c0a7ce6SDominik Dingel 	int ret;
3688c0a7ce6SDominik Dingel 
3698c0a7ce6SDominik Dingel 	switch (attr->attr) {
3708c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
3718c0a7ce6SDominik Dingel 		ret = 0;
372c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
373c92ea7b9SChristian Borntraeger 			 kvm->arch.gmap->asce_end);
3748c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
3758c0a7ce6SDominik Dingel 			ret = -EFAULT;
3768c0a7ce6SDominik Dingel 		break;
3778c0a7ce6SDominik Dingel 	default:
3788c0a7ce6SDominik Dingel 		ret = -ENXIO;
3798c0a7ce6SDominik Dingel 		break;
3808c0a7ce6SDominik Dingel 	}
3818c0a7ce6SDominik Dingel 	return ret;
3828c0a7ce6SDominik Dingel }
3838c0a7ce6SDominik Dingel 
3848c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3854f718eabSDominik Dingel {
3864f718eabSDominik Dingel 	int ret;
3874f718eabSDominik Dingel 	unsigned int idx;
3884f718eabSDominik Dingel 	switch (attr->attr) {
3894f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
390e6db1d61SDominik Dingel 		/* enable CMMA only for z10 and later (EDAT_1) */
391e6db1d61SDominik Dingel 		ret = -EINVAL;
392e6db1d61SDominik Dingel 		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
393e6db1d61SDominik Dingel 			break;
394e6db1d61SDominik Dingel 
3954f718eabSDominik Dingel 		ret = -EBUSY;
396c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
3974f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3984f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3994f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
4004f718eabSDominik Dingel 			ret = 0;
4014f718eabSDominik Dingel 		}
4024f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
4034f718eabSDominik Dingel 		break;
4044f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
405c3489155SDominik Dingel 		ret = -EINVAL;
406c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
407c3489155SDominik Dingel 			break;
408c3489155SDominik Dingel 
409c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4104f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
4114f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
412a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
4134f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
4144f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
4154f718eabSDominik Dingel 		ret = 0;
4164f718eabSDominik Dingel 		break;
4178c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
4188c0a7ce6SDominik Dingel 		unsigned long new_limit;
4198c0a7ce6SDominik Dingel 
4208c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
4218c0a7ce6SDominik Dingel 			return -EINVAL;
4228c0a7ce6SDominik Dingel 
4238c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
4248c0a7ce6SDominik Dingel 			return -EFAULT;
4258c0a7ce6SDominik Dingel 
4268c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
4278c0a7ce6SDominik Dingel 			return -E2BIG;
4288c0a7ce6SDominik Dingel 
4298c0a7ce6SDominik Dingel 		ret = -EBUSY;
4308c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
4318c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
4328c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
4338c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
4348c0a7ce6SDominik Dingel 
4358c0a7ce6SDominik Dingel 			if (!new) {
4368c0a7ce6SDominik Dingel 				ret = -ENOMEM;
4378c0a7ce6SDominik Dingel 			} else {
4388c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
4398c0a7ce6SDominik Dingel 				new->private = kvm;
4408c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
4418c0a7ce6SDominik Dingel 				ret = 0;
4428c0a7ce6SDominik Dingel 			}
4438c0a7ce6SDominik Dingel 		}
4448c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
445c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
4468c0a7ce6SDominik Dingel 		break;
4478c0a7ce6SDominik Dingel 	}
4484f718eabSDominik Dingel 	default:
4494f718eabSDominik Dingel 		ret = -ENXIO;
4504f718eabSDominik Dingel 		break;
4514f718eabSDominik Dingel 	}
4524f718eabSDominik Dingel 	return ret;
4534f718eabSDominik Dingel }
4544f718eabSDominik Dingel 
455a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
456a374e892STony Krowiak 
457a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
458a374e892STony Krowiak {
459a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
460a374e892STony Krowiak 	int i;
461a374e892STony Krowiak 
4629d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
463a374e892STony Krowiak 		return -EINVAL;
464a374e892STony Krowiak 
465a374e892STony Krowiak 	mutex_lock(&kvm->lock);
466a374e892STony Krowiak 	switch (attr->attr) {
467a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
468a374e892STony Krowiak 		get_random_bytes(
469a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
470a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
471a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
472c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
473a374e892STony Krowiak 		break;
474a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
475a374e892STony Krowiak 		get_random_bytes(
476a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
477a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
478a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
479c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
480a374e892STony Krowiak 		break;
481a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
482a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
483a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
484a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
485c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
486a374e892STony Krowiak 		break;
487a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
488a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
489a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
490a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
491c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
492a374e892STony Krowiak 		break;
493a374e892STony Krowiak 	default:
494a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
495a374e892STony Krowiak 		return -ENXIO;
496a374e892STony Krowiak 	}
497a374e892STony Krowiak 
498a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
499a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
500a374e892STony Krowiak 		exit_sie(vcpu);
501a374e892STony Krowiak 	}
502a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
503a374e892STony Krowiak 	return 0;
504a374e892STony Krowiak }
505a374e892STony Krowiak 
50672f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
50772f25020SJason J. Herne {
50872f25020SJason J. Herne 	u8 gtod_high;
50972f25020SJason J. Herne 
51072f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
51172f25020SJason J. Herne 					   sizeof(gtod_high)))
51272f25020SJason J. Herne 		return -EFAULT;
51372f25020SJason J. Herne 
51472f25020SJason J. Herne 	if (gtod_high != 0)
51572f25020SJason J. Herne 		return -EINVAL;
516c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
51772f25020SJason J. Herne 
51872f25020SJason J. Herne 	return 0;
51972f25020SJason J. Herne }
52072f25020SJason J. Herne 
52172f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
52272f25020SJason J. Herne {
52372f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
52472f25020SJason J. Herne 	unsigned int vcpu_idx;
52572f25020SJason J. Herne 	u64 host_tod, gtod;
52672f25020SJason J. Herne 	int r;
52772f25020SJason J. Herne 
52872f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
52972f25020SJason J. Herne 		return -EFAULT;
53072f25020SJason J. Herne 
53172f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
53272f25020SJason J. Herne 	if (r)
53372f25020SJason J. Herne 		return r;
53472f25020SJason J. Herne 
53572f25020SJason J. Herne 	mutex_lock(&kvm->lock);
536*fdf03650SFan Zhang 	preempt_disable();
53772f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
53827406cd5SChristian Borntraeger 	kvm_s390_vcpu_block_all(kvm);
53927406cd5SChristian Borntraeger 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
54072f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
54127406cd5SChristian Borntraeger 	kvm_s390_vcpu_unblock_all(kvm);
542*fdf03650SFan Zhang 	preempt_enable();
54372f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
544c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
54572f25020SJason J. Herne 	return 0;
54672f25020SJason J. Herne }
54772f25020SJason J. Herne 
54872f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
54972f25020SJason J. Herne {
55072f25020SJason J. Herne 	int ret;
55172f25020SJason J. Herne 
55272f25020SJason J. Herne 	if (attr->flags)
55372f25020SJason J. Herne 		return -EINVAL;
55472f25020SJason J. Herne 
55572f25020SJason J. Herne 	switch (attr->attr) {
55672f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
55772f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
55872f25020SJason J. Herne 		break;
55972f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
56072f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
56172f25020SJason J. Herne 		break;
56272f25020SJason J. Herne 	default:
56372f25020SJason J. Herne 		ret = -ENXIO;
56472f25020SJason J. Herne 		break;
56572f25020SJason J. Herne 	}
56672f25020SJason J. Herne 	return ret;
56772f25020SJason J. Herne }
56872f25020SJason J. Herne 
56972f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
57072f25020SJason J. Herne {
57172f25020SJason J. Herne 	u8 gtod_high = 0;
57272f25020SJason J. Herne 
57372f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
57472f25020SJason J. Herne 					 sizeof(gtod_high)))
57572f25020SJason J. Herne 		return -EFAULT;
576c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
57772f25020SJason J. Herne 
57872f25020SJason J. Herne 	return 0;
57972f25020SJason J. Herne }
58072f25020SJason J. Herne 
58172f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
58272f25020SJason J. Herne {
58372f25020SJason J. Herne 	u64 host_tod, gtod;
58472f25020SJason J. Herne 	int r;
58572f25020SJason J. Herne 
58672f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
58772f25020SJason J. Herne 	if (r)
58872f25020SJason J. Herne 		return r;
58972f25020SJason J. Herne 
590*fdf03650SFan Zhang 	preempt_disable();
59172f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
592*fdf03650SFan Zhang 	preempt_enable();
59372f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
59472f25020SJason J. Herne 		return -EFAULT;
595c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
59672f25020SJason J. Herne 
59772f25020SJason J. Herne 	return 0;
59872f25020SJason J. Herne }
59972f25020SJason J. Herne 
60072f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
60172f25020SJason J. Herne {
60272f25020SJason J. Herne 	int ret;
60372f25020SJason J. Herne 
60472f25020SJason J. Herne 	if (attr->flags)
60572f25020SJason J. Herne 		return -EINVAL;
60672f25020SJason J. Herne 
60772f25020SJason J. Herne 	switch (attr->attr) {
60872f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
60972f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
61072f25020SJason J. Herne 		break;
61172f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
61272f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
61372f25020SJason J. Herne 		break;
61472f25020SJason J. Herne 	default:
61572f25020SJason J. Herne 		ret = -ENXIO;
61672f25020SJason J. Herne 		break;
61772f25020SJason J. Herne 	}
61872f25020SJason J. Herne 	return ret;
61972f25020SJason J. Herne }
62072f25020SJason J. Herne 
621658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
622658b6edaSMichael Mueller {
623658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
624658b6edaSMichael Mueller 	int ret = 0;
625658b6edaSMichael Mueller 
626658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
627658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
628658b6edaSMichael Mueller 		ret = -EBUSY;
629658b6edaSMichael Mueller 		goto out;
630658b6edaSMichael Mueller 	}
631658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
632658b6edaSMichael Mueller 	if (!proc) {
633658b6edaSMichael Mueller 		ret = -ENOMEM;
634658b6edaSMichael Mueller 		goto out;
635658b6edaSMichael Mueller 	}
636658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
637658b6edaSMichael Mueller 			    sizeof(*proc))) {
638658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
639658b6edaSMichael Mueller 		       sizeof(struct cpuid));
640658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
641981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
642658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
643658b6edaSMichael Mueller 	} else
644658b6edaSMichael Mueller 		ret = -EFAULT;
645658b6edaSMichael Mueller 	kfree(proc);
646658b6edaSMichael Mueller out:
647658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
648658b6edaSMichael Mueller 	return ret;
649658b6edaSMichael Mueller }
650658b6edaSMichael Mueller 
651658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
652658b6edaSMichael Mueller {
653658b6edaSMichael Mueller 	int ret = -ENXIO;
654658b6edaSMichael Mueller 
655658b6edaSMichael Mueller 	switch (attr->attr) {
656658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
657658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
658658b6edaSMichael Mueller 		break;
659658b6edaSMichael Mueller 	}
660658b6edaSMichael Mueller 	return ret;
661658b6edaSMichael Mueller }
662658b6edaSMichael Mueller 
663658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
664658b6edaSMichael Mueller {
665658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
666658b6edaSMichael Mueller 	int ret = 0;
667658b6edaSMichael Mueller 
668658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
669658b6edaSMichael Mueller 	if (!proc) {
670658b6edaSMichael Mueller 		ret = -ENOMEM;
671658b6edaSMichael Mueller 		goto out;
672658b6edaSMichael Mueller 	}
673658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
674658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
675981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
676658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
677658b6edaSMichael Mueller 		ret = -EFAULT;
678658b6edaSMichael Mueller 	kfree(proc);
679658b6edaSMichael Mueller out:
680658b6edaSMichael Mueller 	return ret;
681658b6edaSMichael Mueller }
682658b6edaSMichael Mueller 
683658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
684658b6edaSMichael Mueller {
685658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
686658b6edaSMichael Mueller 	int ret = 0;
687658b6edaSMichael Mueller 
688658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
689658b6edaSMichael Mueller 	if (!mach) {
690658b6edaSMichael Mueller 		ret = -ENOMEM;
691658b6edaSMichael Mueller 		goto out;
692658b6edaSMichael Mueller 	}
693658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
69437c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
695981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
696981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
697658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
69894422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
699658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
700658b6edaSMichael Mueller 		ret = -EFAULT;
701658b6edaSMichael Mueller 	kfree(mach);
702658b6edaSMichael Mueller out:
703658b6edaSMichael Mueller 	return ret;
704658b6edaSMichael Mueller }
705658b6edaSMichael Mueller 
706658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
707658b6edaSMichael Mueller {
708658b6edaSMichael Mueller 	int ret = -ENXIO;
709658b6edaSMichael Mueller 
710658b6edaSMichael Mueller 	switch (attr->attr) {
711658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
712658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
713658b6edaSMichael Mueller 		break;
714658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
715658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
716658b6edaSMichael Mueller 		break;
717658b6edaSMichael Mueller 	}
718658b6edaSMichael Mueller 	return ret;
719658b6edaSMichael Mueller }
720658b6edaSMichael Mueller 
721f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
722f2061656SDominik Dingel {
723f2061656SDominik Dingel 	int ret;
724f2061656SDominik Dingel 
725f2061656SDominik Dingel 	switch (attr->group) {
7264f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7278c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
7284f718eabSDominik Dingel 		break;
72972f25020SJason J. Herne 	case KVM_S390_VM_TOD:
73072f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
73172f25020SJason J. Herne 		break;
732658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
733658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
734658b6edaSMichael Mueller 		break;
735a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
736a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
737a374e892STony Krowiak 		break;
738f2061656SDominik Dingel 	default:
739f2061656SDominik Dingel 		ret = -ENXIO;
740f2061656SDominik Dingel 		break;
741f2061656SDominik Dingel 	}
742f2061656SDominik Dingel 
743f2061656SDominik Dingel 	return ret;
744f2061656SDominik Dingel }
745f2061656SDominik Dingel 
746f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
747f2061656SDominik Dingel {
7488c0a7ce6SDominik Dingel 	int ret;
7498c0a7ce6SDominik Dingel 
7508c0a7ce6SDominik Dingel 	switch (attr->group) {
7518c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7528c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
7538c0a7ce6SDominik Dingel 		break;
75472f25020SJason J. Herne 	case KVM_S390_VM_TOD:
75572f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
75672f25020SJason J. Herne 		break;
757658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
758658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
759658b6edaSMichael Mueller 		break;
7608c0a7ce6SDominik Dingel 	default:
7618c0a7ce6SDominik Dingel 		ret = -ENXIO;
7628c0a7ce6SDominik Dingel 		break;
7638c0a7ce6SDominik Dingel 	}
7648c0a7ce6SDominik Dingel 
7658c0a7ce6SDominik Dingel 	return ret;
766f2061656SDominik Dingel }
767f2061656SDominik Dingel 
768f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
769f2061656SDominik Dingel {
770f2061656SDominik Dingel 	int ret;
771f2061656SDominik Dingel 
772f2061656SDominik Dingel 	switch (attr->group) {
7734f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7744f718eabSDominik Dingel 		switch (attr->attr) {
7754f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
7764f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
7778c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
7784f718eabSDominik Dingel 			ret = 0;
7794f718eabSDominik Dingel 			break;
7804f718eabSDominik Dingel 		default:
7814f718eabSDominik Dingel 			ret = -ENXIO;
7824f718eabSDominik Dingel 			break;
7834f718eabSDominik Dingel 		}
7844f718eabSDominik Dingel 		break;
78572f25020SJason J. Herne 	case KVM_S390_VM_TOD:
78672f25020SJason J. Herne 		switch (attr->attr) {
78772f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
78872f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
78972f25020SJason J. Herne 			ret = 0;
79072f25020SJason J. Herne 			break;
79172f25020SJason J. Herne 		default:
79272f25020SJason J. Herne 			ret = -ENXIO;
79372f25020SJason J. Herne 			break;
79472f25020SJason J. Herne 		}
79572f25020SJason J. Herne 		break;
796658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
797658b6edaSMichael Mueller 		switch (attr->attr) {
798658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
799658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
800658b6edaSMichael Mueller 			ret = 0;
801658b6edaSMichael Mueller 			break;
802658b6edaSMichael Mueller 		default:
803658b6edaSMichael Mueller 			ret = -ENXIO;
804658b6edaSMichael Mueller 			break;
805658b6edaSMichael Mueller 		}
806658b6edaSMichael Mueller 		break;
807a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
808a374e892STony Krowiak 		switch (attr->attr) {
809a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
810a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
811a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
812a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
813a374e892STony Krowiak 			ret = 0;
814a374e892STony Krowiak 			break;
815a374e892STony Krowiak 		default:
816a374e892STony Krowiak 			ret = -ENXIO;
817a374e892STony Krowiak 			break;
818a374e892STony Krowiak 		}
819a374e892STony Krowiak 		break;
820f2061656SDominik Dingel 	default:
821f2061656SDominik Dingel 		ret = -ENXIO;
822f2061656SDominik Dingel 		break;
823f2061656SDominik Dingel 	}
824f2061656SDominik Dingel 
825f2061656SDominik Dingel 	return ret;
826f2061656SDominik Dingel }
827f2061656SDominik Dingel 
82830ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
82930ee2a98SJason J. Herne {
83030ee2a98SJason J. Herne 	uint8_t *keys;
83130ee2a98SJason J. Herne 	uint64_t hva;
83230ee2a98SJason J. Herne 	unsigned long curkey;
83330ee2a98SJason J. Herne 	int i, r = 0;
83430ee2a98SJason J. Herne 
83530ee2a98SJason J. Herne 	if (args->flags != 0)
83630ee2a98SJason J. Herne 		return -EINVAL;
83730ee2a98SJason J. Herne 
83830ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
83930ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
84030ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
84130ee2a98SJason J. Herne 
84230ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
84330ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
84430ee2a98SJason J. Herne 		return -EINVAL;
84530ee2a98SJason J. Herne 
84630ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
84730ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
84830ee2a98SJason J. Herne 	if (!keys)
84930ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
85030ee2a98SJason J. Herne 	if (!keys)
85130ee2a98SJason J. Herne 		return -ENOMEM;
85230ee2a98SJason J. Herne 
85330ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
85430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
85530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
85630ee2a98SJason J. Herne 			r = -EFAULT;
85730ee2a98SJason J. Herne 			goto out;
85830ee2a98SJason J. Herne 		}
85930ee2a98SJason J. Herne 
86030ee2a98SJason J. Herne 		curkey = get_guest_storage_key(current->mm, hva);
86130ee2a98SJason J. Herne 		if (IS_ERR_VALUE(curkey)) {
86230ee2a98SJason J. Herne 			r = curkey;
86330ee2a98SJason J. Herne 			goto out;
86430ee2a98SJason J. Herne 		}
86530ee2a98SJason J. Herne 		keys[i] = curkey;
86630ee2a98SJason J. Herne 	}
86730ee2a98SJason J. Herne 
86830ee2a98SJason J. Herne 	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
86930ee2a98SJason J. Herne 			 sizeof(uint8_t) * args->count);
87030ee2a98SJason J. Herne 	if (r)
87130ee2a98SJason J. Herne 		r = -EFAULT;
87230ee2a98SJason J. Herne out:
87330ee2a98SJason J. Herne 	kvfree(keys);
87430ee2a98SJason J. Herne 	return r;
87530ee2a98SJason J. Herne }
87630ee2a98SJason J. Herne 
87730ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
87830ee2a98SJason J. Herne {
87930ee2a98SJason J. Herne 	uint8_t *keys;
88030ee2a98SJason J. Herne 	uint64_t hva;
88130ee2a98SJason J. Herne 	int i, r = 0;
88230ee2a98SJason J. Herne 
88330ee2a98SJason J. Herne 	if (args->flags != 0)
88430ee2a98SJason J. Herne 		return -EINVAL;
88530ee2a98SJason J. Herne 
88630ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
88730ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
88830ee2a98SJason J. Herne 		return -EINVAL;
88930ee2a98SJason J. Herne 
89030ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
89130ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
89230ee2a98SJason J. Herne 	if (!keys)
89330ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
89430ee2a98SJason J. Herne 	if (!keys)
89530ee2a98SJason J. Herne 		return -ENOMEM;
89630ee2a98SJason J. Herne 
89730ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
89830ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
89930ee2a98SJason J. Herne 	if (r) {
90030ee2a98SJason J. Herne 		r = -EFAULT;
90130ee2a98SJason J. Herne 		goto out;
90230ee2a98SJason J. Herne 	}
90330ee2a98SJason J. Herne 
90430ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
90514d4a425SDominik Dingel 	r = s390_enable_skey();
90614d4a425SDominik Dingel 	if (r)
90714d4a425SDominik Dingel 		goto out;
90830ee2a98SJason J. Herne 
90930ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
91030ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
91130ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
91230ee2a98SJason J. Herne 			r = -EFAULT;
91330ee2a98SJason J. Herne 			goto out;
91430ee2a98SJason J. Herne 		}
91530ee2a98SJason J. Herne 
91630ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
91730ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
91830ee2a98SJason J. Herne 			r = -EINVAL;
91930ee2a98SJason J. Herne 			goto out;
92030ee2a98SJason J. Herne 		}
92130ee2a98SJason J. Herne 
92230ee2a98SJason J. Herne 		r = set_guest_storage_key(current->mm, hva,
92330ee2a98SJason J. Herne 					  (unsigned long)keys[i], 0);
92430ee2a98SJason J. Herne 		if (r)
92530ee2a98SJason J. Herne 			goto out;
92630ee2a98SJason J. Herne 	}
92730ee2a98SJason J. Herne out:
92830ee2a98SJason J. Herne 	kvfree(keys);
92930ee2a98SJason J. Herne 	return r;
93030ee2a98SJason J. Herne }
93130ee2a98SJason J. Herne 
932b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
933b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
934b0c632dbSHeiko Carstens {
935b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
936b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
937f2061656SDominik Dingel 	struct kvm_device_attr attr;
938b0c632dbSHeiko Carstens 	int r;
939b0c632dbSHeiko Carstens 
940b0c632dbSHeiko Carstens 	switch (ioctl) {
941ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
942ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
943ba5c1e9bSCarsten Otte 
944ba5c1e9bSCarsten Otte 		r = -EFAULT;
945ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
946ba5c1e9bSCarsten Otte 			break;
947ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
948ba5c1e9bSCarsten Otte 		break;
949ba5c1e9bSCarsten Otte 	}
950d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
951d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
952d938dc55SCornelia Huck 		r = -EFAULT;
953d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
954d938dc55SCornelia Huck 			break;
955d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
956d938dc55SCornelia Huck 		break;
957d938dc55SCornelia Huck 	}
95884223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
95984223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
96084223598SCornelia Huck 
96184223598SCornelia Huck 		r = -EINVAL;
96284223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
96384223598SCornelia Huck 			/* Set up dummy routing. */
96484223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
96584223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
96684223598SCornelia Huck 			r = 0;
96784223598SCornelia Huck 		}
96884223598SCornelia Huck 		break;
96984223598SCornelia Huck 	}
970f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
971f2061656SDominik Dingel 		r = -EFAULT;
972f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
973f2061656SDominik Dingel 			break;
974f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
975f2061656SDominik Dingel 		break;
976f2061656SDominik Dingel 	}
977f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
978f2061656SDominik Dingel 		r = -EFAULT;
979f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
980f2061656SDominik Dingel 			break;
981f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
982f2061656SDominik Dingel 		break;
983f2061656SDominik Dingel 	}
984f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
985f2061656SDominik Dingel 		r = -EFAULT;
986f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
987f2061656SDominik Dingel 			break;
988f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
989f2061656SDominik Dingel 		break;
990f2061656SDominik Dingel 	}
99130ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
99230ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
99330ee2a98SJason J. Herne 
99430ee2a98SJason J. Herne 		r = -EFAULT;
99530ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
99630ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
99730ee2a98SJason J. Herne 			break;
99830ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
99930ee2a98SJason J. Herne 		break;
100030ee2a98SJason J. Herne 	}
100130ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
100230ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
100330ee2a98SJason J. Herne 
100430ee2a98SJason J. Herne 		r = -EFAULT;
100530ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
100630ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
100730ee2a98SJason J. Herne 			break;
100830ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
100930ee2a98SJason J. Herne 		break;
101030ee2a98SJason J. Herne 	}
1011b0c632dbSHeiko Carstens 	default:
1012367e1319SAvi Kivity 		r = -ENOTTY;
1013b0c632dbSHeiko Carstens 	}
1014b0c632dbSHeiko Carstens 
1015b0c632dbSHeiko Carstens 	return r;
1016b0c632dbSHeiko Carstens }
1017b0c632dbSHeiko Carstens 
101845c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
101945c9b47cSTony Krowiak {
102045c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
102186044c8cSChristian Borntraeger 	u32 cc = 0;
102245c9b47cSTony Krowiak 
102386044c8cSChristian Borntraeger 	memset(config, 0, 128);
102445c9b47cSTony Krowiak 	asm volatile(
102545c9b47cSTony Krowiak 		"lgr 0,%1\n"
102645c9b47cSTony Krowiak 		"lgr 2,%2\n"
102745c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
102886044c8cSChristian Borntraeger 		"0: ipm %0\n"
102945c9b47cSTony Krowiak 		"srl %0,28\n"
103086044c8cSChristian Borntraeger 		"1:\n"
103186044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
103286044c8cSChristian Borntraeger 		: "+r" (cc)
103345c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
103445c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
103545c9b47cSTony Krowiak 	);
103645c9b47cSTony Krowiak 
103745c9b47cSTony Krowiak 	return cc;
103845c9b47cSTony Krowiak }
103945c9b47cSTony Krowiak 
104045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
104145c9b47cSTony Krowiak {
104245c9b47cSTony Krowiak 	u8 config[128];
104345c9b47cSTony Krowiak 	int cc;
104445c9b47cSTony Krowiak 
104545c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
104645c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
104745c9b47cSTony Krowiak 
104845c9b47cSTony Krowiak 		if (cc)
104945c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
105045c9b47cSTony Krowiak 		else
105145c9b47cSTony Krowiak 			return config[0] & 0x40;
105245c9b47cSTony Krowiak 	}
105345c9b47cSTony Krowiak 
105445c9b47cSTony Krowiak 	return 0;
105545c9b47cSTony Krowiak }
105645c9b47cSTony Krowiak 
105745c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
105845c9b47cSTony Krowiak {
105945c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
106045c9b47cSTony Krowiak 
106145c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
106245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
106345c9b47cSTony Krowiak 	else
106445c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
106545c9b47cSTony Krowiak }
106645c9b47cSTony Krowiak 
10679d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
10689d8d5786SMichael Mueller {
10699d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
10709d8d5786SMichael Mueller 	cpu_id->version = 0xff;
10719d8d5786SMichael Mueller }
10729d8d5786SMichael Mueller 
10735102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
10745102ee87STony Krowiak {
10759d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
10765102ee87STony Krowiak 		return 0;
10775102ee87STony Krowiak 
10785102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
10795102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
10805102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
10815102ee87STony Krowiak 		return -ENOMEM;
10825102ee87STony Krowiak 
108345c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
10845102ee87STony Krowiak 
1085ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1086ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1087ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1088ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1089ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1090ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1091ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1092a374e892STony Krowiak 
10935102ee87STony Krowiak 	return 0;
10945102ee87STony Krowiak }
10955102ee87STony Krowiak 
1096e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1097b0c632dbSHeiko Carstens {
10989d8d5786SMichael Mueller 	int i, rc;
1099b0c632dbSHeiko Carstens 	char debug_name[16];
1100f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1101b0c632dbSHeiko Carstens 
1102e08b9637SCarsten Otte 	rc = -EINVAL;
1103e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1104e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1105e08b9637SCarsten Otte 		goto out_err;
1106e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1107e08b9637SCarsten Otte 		goto out_err;
1108e08b9637SCarsten Otte #else
1109e08b9637SCarsten Otte 	if (type)
1110e08b9637SCarsten Otte 		goto out_err;
1111e08b9637SCarsten Otte #endif
1112e08b9637SCarsten Otte 
1113b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1114b0c632dbSHeiko Carstens 	if (rc)
1115d89f5effSJan Kiszka 		goto out_err;
1116b0c632dbSHeiko Carstens 
1117b290411aSCarsten Otte 	rc = -ENOMEM;
1118b290411aSCarsten Otte 
1119b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1120b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1121d89f5effSJan Kiszka 		goto out_err;
1122f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1123f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
1124f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1125f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1126b0c632dbSHeiko Carstens 
1127b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1128b0c632dbSHeiko Carstens 
11291cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1130b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
113140f5b735SDominik Dingel 		goto out_err;
1132b0c632dbSHeiko Carstens 
11339d8d5786SMichael Mueller 	/*
11349d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
11359d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
1136981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
1137981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
11389d8d5786SMichael Mueller 	 * 31 bits and word aligned.
11399d8d5786SMichael Mueller 	 */
11409d8d5786SMichael Mueller 	kvm->arch.model.fac =
1141981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
11429d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
114340f5b735SDominik Dingel 		goto out_err;
11449d8d5786SMichael Mueller 
1145fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1146981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
114794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
11489d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
11499d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1150981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
11519d8d5786SMichael Mueller 		else
1152981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
11539d8d5786SMichael Mueller 	}
11549d8d5786SMichael Mueller 
1155981467c9SMichael Mueller 	/* Populate the facility list initially. */
1156981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1157981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1158981467c9SMichael Mueller 
11599d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
116037c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
11619d8d5786SMichael Mueller 
11625102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
116340f5b735SDominik Dingel 		goto out_err;
11645102ee87STony Krowiak 
1165ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
11666d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
11676d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
11688a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1169a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1170ba5c1e9bSCarsten Otte 
1171b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
117278f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1173b0c632dbSHeiko Carstens 
1174e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1175e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1176e08b9637SCarsten Otte 	} else {
11770349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
1178598841caSCarsten Otte 		if (!kvm->arch.gmap)
117940f5b735SDominik Dingel 			goto out_err;
11802c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
118124eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1182e08b9637SCarsten Otte 	}
1183fa6b7fe9SCornelia Huck 
1184fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
118584223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
118672f25020SJason J. Herne 	kvm->arch.epoch = 0;
1187fa6b7fe9SCornelia Huck 
11888ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
118978f26131SChristian Borntraeger 	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
11908ad35755SDavid Hildenbrand 
1191d89f5effSJan Kiszka 	return 0;
1192d89f5effSJan Kiszka out_err:
119340f5b735SDominik Dingel 	kfree(kvm->arch.crypto.crycb);
119440f5b735SDominik Dingel 	free_page((unsigned long)kvm->arch.model.fac);
119540f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
119640f5b735SDominik Dingel 	free_page((unsigned long)(kvm->arch.sca));
119778f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1198d89f5effSJan Kiszka 	return rc;
1199b0c632dbSHeiko Carstens }
1200b0c632dbSHeiko Carstens 
1201d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1202d329c035SChristian Borntraeger {
1203d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1204ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
120567335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
12063c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
120758f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
120858f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
120958f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1210abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1211abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
1212abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
121358f9460bSCarsten Otte 	}
1214abf4a71eSCarsten Otte 	smp_mb();
121527e0393fSCarsten Otte 
121627e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
121727e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
121827e0393fSCarsten Otte 
1219e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1220b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1221d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1222b31288faSKonstantin Weitz 
12236692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1224b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1225d329c035SChristian Borntraeger }
1226d329c035SChristian Borntraeger 
1227d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1228d329c035SChristian Borntraeger {
1229d329c035SChristian Borntraeger 	unsigned int i;
1230988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1231d329c035SChristian Borntraeger 
1232988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1233988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1234988a2caeSGleb Natapov 
1235988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1236988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1237d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1238988a2caeSGleb Natapov 
1239988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1240988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1241d329c035SChristian Borntraeger }
1242d329c035SChristian Borntraeger 
1243b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1244b0c632dbSHeiko Carstens {
1245d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
12469d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1247b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1248d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
12495102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
125027e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1251598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1252841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
125367335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
125478f26131SChristian Borntraeger 	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
1255b0c632dbSHeiko Carstens }
1256b0c632dbSHeiko Carstens 
1257b0c632dbSHeiko Carstens /* Section: vcpu related */
1258dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1259b0c632dbSHeiko Carstens {
1260c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
126127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
126227e0393fSCarsten Otte 		return -ENOMEM;
12632c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1264dafd032aSDominik Dingel 
126527e0393fSCarsten Otte 	return 0;
126627e0393fSCarsten Otte }
126727e0393fSCarsten Otte 
1268dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1269dafd032aSDominik Dingel {
1270dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1271dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
127259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
127359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
12749eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1275b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1276b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1277b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
127868c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
127968c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1280dafd032aSDominik Dingel 
1281dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1282dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1283dafd032aSDominik Dingel 
1284b0c632dbSHeiko Carstens 	return 0;
1285b0c632dbSHeiko Carstens }
1286b0c632dbSHeiko Carstens 
1287b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1288b0c632dbSHeiko Carstens {
12894725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
129018280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129))
129168c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
129268c55750SEric Farman 	else
12934725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1294b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
129518280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
129668c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
129768c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
129868c55750SEric Farman 	} else {
12994725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
13004725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
130168c55750SEric Farman 	}
130259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1303480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
13049e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1305b0c632dbSHeiko Carstens }
1306b0c632dbSHeiko Carstens 
1307b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1308b0c632dbSHeiko Carstens {
13099e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1310480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
131118280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
131268c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
131368c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
131468c55750SEric Farman 	} else {
13154725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
13164725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
131768c55750SEric Farman 	}
131859674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
13194725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
132018280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129))
132168c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
132268c55750SEric Farman 	else
13234725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1324b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1325b0c632dbSHeiko Carstens }
1326b0c632dbSHeiko Carstens 
1327b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1328b0c632dbSHeiko Carstens {
1329b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1330b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1331b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
13328d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1333b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1334b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1335b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1336b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1337b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1338b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1339b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1340b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1341b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1342672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
13433c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
13443c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
13456352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
13466852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
13472ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1348b0c632dbSHeiko Carstens }
1349b0c632dbSHeiko Carstens 
135031928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
135142897d86SMarcelo Tosatti {
135272f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1353*fdf03650SFan Zhang 	preempt_disable();
135472f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1355*fdf03650SFan Zhang 	preempt_enable();
135672f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1357dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1358dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
135942897d86SMarcelo Tosatti }
136042897d86SMarcelo Tosatti 
13615102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
13625102ee87STony Krowiak {
13639d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
13645102ee87STony Krowiak 		return;
13655102ee87STony Krowiak 
1366a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1367a374e892STony Krowiak 
1368a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1369a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1370a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1371a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1372a374e892STony Krowiak 
13735102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
13745102ee87STony Krowiak }
13755102ee87STony Krowiak 
1376b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1377b31605c1SDominik Dingel {
1378b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1379b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1380b31605c1SDominik Dingel }
1381b31605c1SDominik Dingel 
1382b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1383b31605c1SDominik Dingel {
1384b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1385b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1386b31605c1SDominik Dingel 		return -ENOMEM;
1387b31605c1SDominik Dingel 
1388b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1389b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1390b31605c1SDominik Dingel 	return 0;
1391b31605c1SDominik Dingel }
1392b31605c1SDominik Dingel 
139391520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
139491520f1aSMichael Mueller {
139591520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
139691520f1aSMichael Mueller 
139791520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
139891520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
139991520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
140091520f1aSMichael Mueller }
140191520f1aSMichael Mueller 
1402b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1403b0c632dbSHeiko Carstens {
1404b31605c1SDominik Dingel 	int rc = 0;
1405b31288faSKonstantin Weitz 
14069e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
14079e6dabefSCornelia Huck 						    CPUSTAT_SM |
1408a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1409a4a4f191SGuenther Hutzl 
141053df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
141153df84f8SGuenther Hutzl 		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
141253df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1413a4a4f191SGuenther Hutzl 		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1414a4a4f191SGuenther Hutzl 
141591520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
141691520f1aSMichael Mueller 
1417fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
14189d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
14197feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
14207feb6bb8SMichael Mueller 
142169d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1422ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
142337c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1424217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
142537c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1426ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
142718280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
142813211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
142913211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
143013211ea7SEric Farman 	}
1431492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
14325a5e6536SMatthew Rosato 
1433e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1434b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1435b31605c1SDominik Dingel 		if (rc)
1436b31605c1SDominik Dingel 			return rc;
1437b31288faSKonstantin Weitz 	}
14380ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1439ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
14409d8d5786SMichael Mueller 
14415102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
14425102ee87STony Krowiak 
1443b31605c1SDominik Dingel 	return rc;
1444b0c632dbSHeiko Carstens }
1445b0c632dbSHeiko Carstens 
1446b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1447b0c632dbSHeiko Carstens 				      unsigned int id)
1448b0c632dbSHeiko Carstens {
14494d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
14507feb6bb8SMichael Mueller 	struct sie_page *sie_page;
14514d47555aSCarsten Otte 	int rc = -EINVAL;
1452b0c632dbSHeiko Carstens 
14534d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
14544d47555aSCarsten Otte 		goto out;
14554d47555aSCarsten Otte 
14564d47555aSCarsten Otte 	rc = -ENOMEM;
14574d47555aSCarsten Otte 
1458b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1459b0c632dbSHeiko Carstens 	if (!vcpu)
14604d47555aSCarsten Otte 		goto out;
1461b0c632dbSHeiko Carstens 
14627feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
14637feb6bb8SMichael Mueller 	if (!sie_page)
1464b0c632dbSHeiko Carstens 		goto out_free_cpu;
1465b0c632dbSHeiko Carstens 
14667feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
14677feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
146868c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
14697feb6bb8SMichael Mueller 
1470b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
147158f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
147258f9460bSCarsten Otte 		if (!kvm->arch.sca) {
147358f9460bSCarsten Otte 			WARN_ON_ONCE(1);
147458f9460bSCarsten Otte 			goto out_free_cpu;
147558f9460bSCarsten Otte 		}
1476abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
147758f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
147858f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
147958f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
148058f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1481b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1482fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
148358f9460bSCarsten Otte 	}
1484b0c632dbSHeiko Carstens 
1485ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1486ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1487d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
14885288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1489ba5c1e9bSCarsten Otte 
1490b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1491b0c632dbSHeiko Carstens 	if (rc)
14927b06bf2fSWei Yongjun 		goto out_free_sie_block;
1493b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1494b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1495ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1496b0c632dbSHeiko Carstens 
1497b0c632dbSHeiko Carstens 	return vcpu;
14987b06bf2fSWei Yongjun out_free_sie_block:
14997b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1500b0c632dbSHeiko Carstens out_free_cpu:
1501b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
15024d47555aSCarsten Otte out:
1503b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1504b0c632dbSHeiko Carstens }
1505b0c632dbSHeiko Carstens 
1506b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1507b0c632dbSHeiko Carstens {
15089a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1509b0c632dbSHeiko Carstens }
1510b0c632dbSHeiko Carstens 
151127406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
151249b99e1eSChristian Borntraeger {
151349b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
151461a6df54SDavid Hildenbrand 	exit_sie(vcpu);
151549b99e1eSChristian Borntraeger }
151649b99e1eSChristian Borntraeger 
151727406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
151849b99e1eSChristian Borntraeger {
151949b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
152049b99e1eSChristian Borntraeger }
152149b99e1eSChristian Borntraeger 
15228e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
15238e236546SChristian Borntraeger {
15248e236546SChristian Borntraeger 	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
152561a6df54SDavid Hildenbrand 	exit_sie(vcpu);
15268e236546SChristian Borntraeger }
15278e236546SChristian Borntraeger 
15288e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
15298e236546SChristian Borntraeger {
15308e236546SChristian Borntraeger 	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
15318e236546SChristian Borntraeger }
15328e236546SChristian Borntraeger 
153349b99e1eSChristian Borntraeger /*
153449b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
153549b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
153649b99e1eSChristian Borntraeger  * return immediately. */
153749b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
153849b99e1eSChristian Borntraeger {
153949b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
154049b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
154149b99e1eSChristian Borntraeger 		cpu_relax();
154249b99e1eSChristian Borntraeger }
154349b99e1eSChristian Borntraeger 
15448e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
15458e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
154649b99e1eSChristian Borntraeger {
15478e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
15488e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
154949b99e1eSChristian Borntraeger }
155049b99e1eSChristian Borntraeger 
15512c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
15522c70fe44SChristian Borntraeger {
15532c70fe44SChristian Borntraeger 	int i;
15542c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
15552c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
15562c70fe44SChristian Borntraeger 
15572c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
15582c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1559fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
15602c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
15618e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
15622c70fe44SChristian Borntraeger 		}
15632c70fe44SChristian Borntraeger 	}
15642c70fe44SChristian Borntraeger }
15652c70fe44SChristian Borntraeger 
1566b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1567b6d33834SChristoffer Dall {
1568b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1569b6d33834SChristoffer Dall 	BUG();
1570b6d33834SChristoffer Dall 	return 0;
1571b6d33834SChristoffer Dall }
1572b6d33834SChristoffer Dall 
157314eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
157414eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
157514eebd91SCarsten Otte {
157614eebd91SCarsten Otte 	int r = -EINVAL;
157714eebd91SCarsten Otte 
157814eebd91SCarsten Otte 	switch (reg->id) {
157929b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
158029b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
158129b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
158229b7c71bSCarsten Otte 		break;
158329b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
158429b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
158529b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
158629b7c71bSCarsten Otte 		break;
158746a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
158846a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
158946a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
159046a6dd1cSJason J. herne 		break;
159146a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
159246a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
159346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
159446a6dd1cSJason J. herne 		break;
1595536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1596536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1597536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1598536336c2SDominik Dingel 		break;
1599536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1600536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1601536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1602536336c2SDominik Dingel 		break;
1603536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1604536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1605536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1606536336c2SDominik Dingel 		break;
1607672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1608672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1609672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1610672550fbSChristian Borntraeger 		break;
1611afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1612afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1613afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1614afa45ff5SChristian Borntraeger 		break;
161514eebd91SCarsten Otte 	default:
161614eebd91SCarsten Otte 		break;
161714eebd91SCarsten Otte 	}
161814eebd91SCarsten Otte 
161914eebd91SCarsten Otte 	return r;
162014eebd91SCarsten Otte }
162114eebd91SCarsten Otte 
162214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
162314eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
162414eebd91SCarsten Otte {
162514eebd91SCarsten Otte 	int r = -EINVAL;
162614eebd91SCarsten Otte 
162714eebd91SCarsten Otte 	switch (reg->id) {
162829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
162929b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
163029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
163129b7c71bSCarsten Otte 		break;
163229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
163329b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
163429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
163529b7c71bSCarsten Otte 		break;
163646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
163746a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
163846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
163946a6dd1cSJason J. herne 		break;
164046a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
164146a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
164246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
164346a6dd1cSJason J. herne 		break;
1644536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1645536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1646536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
16479fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
16489fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1649536336c2SDominik Dingel 		break;
1650536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1651536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1652536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1653536336c2SDominik Dingel 		break;
1654536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1655536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1656536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1657536336c2SDominik Dingel 		break;
1658672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1659672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1660672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1661672550fbSChristian Borntraeger 		break;
1662afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1663afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1664afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1665afa45ff5SChristian Borntraeger 		break;
166614eebd91SCarsten Otte 	default:
166714eebd91SCarsten Otte 		break;
166814eebd91SCarsten Otte 	}
166914eebd91SCarsten Otte 
167014eebd91SCarsten Otte 	return r;
167114eebd91SCarsten Otte }
1672b6d33834SChristoffer Dall 
1673b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1674b0c632dbSHeiko Carstens {
1675b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1676b0c632dbSHeiko Carstens 	return 0;
1677b0c632dbSHeiko Carstens }
1678b0c632dbSHeiko Carstens 
1679b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1680b0c632dbSHeiko Carstens {
16815a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1682b0c632dbSHeiko Carstens 	return 0;
1683b0c632dbSHeiko Carstens }
1684b0c632dbSHeiko Carstens 
1685b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1686b0c632dbSHeiko Carstens {
16875a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1688b0c632dbSHeiko Carstens 	return 0;
1689b0c632dbSHeiko Carstens }
1690b0c632dbSHeiko Carstens 
1691b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1692b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1693b0c632dbSHeiko Carstens {
169459674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1695b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
169659674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1697b0c632dbSHeiko Carstens 	return 0;
1698b0c632dbSHeiko Carstens }
1699b0c632dbSHeiko Carstens 
1700b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1701b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1702b0c632dbSHeiko Carstens {
170359674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1704b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1705b0c632dbSHeiko Carstens 	return 0;
1706b0c632dbSHeiko Carstens }
1707b0c632dbSHeiko Carstens 
1708b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1709b0c632dbSHeiko Carstens {
17104725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
17114725c860SMartin Schwidefsky 		return -EINVAL;
1712b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
17134725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
17144725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
17154725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1716b0c632dbSHeiko Carstens 	return 0;
1717b0c632dbSHeiko Carstens }
1718b0c632dbSHeiko Carstens 
1719b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1720b0c632dbSHeiko Carstens {
1721b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1722b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1723b0c632dbSHeiko Carstens 	return 0;
1724b0c632dbSHeiko Carstens }
1725b0c632dbSHeiko Carstens 
1726b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1727b0c632dbSHeiko Carstens {
1728b0c632dbSHeiko Carstens 	int rc = 0;
1729b0c632dbSHeiko Carstens 
17307a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1731b0c632dbSHeiko Carstens 		rc = -EBUSY;
1732d7b0b5ebSCarsten Otte 	else {
1733d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1734d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1735d7b0b5ebSCarsten Otte 	}
1736b0c632dbSHeiko Carstens 	return rc;
1737b0c632dbSHeiko Carstens }
1738b0c632dbSHeiko Carstens 
1739b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1740b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1741b0c632dbSHeiko Carstens {
1742b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1743b0c632dbSHeiko Carstens }
1744b0c632dbSHeiko Carstens 
174527291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
174627291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
174727291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
174827291e21SDavid Hildenbrand 
1749d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1750d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1751b0c632dbSHeiko Carstens {
175227291e21SDavid Hildenbrand 	int rc = 0;
175327291e21SDavid Hildenbrand 
175427291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
175527291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
175627291e21SDavid Hildenbrand 
17572de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
175827291e21SDavid Hildenbrand 		return -EINVAL;
175927291e21SDavid Hildenbrand 
176027291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
176127291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
176227291e21SDavid Hildenbrand 		/* enforce guest PER */
176327291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
176427291e21SDavid Hildenbrand 
176527291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
176627291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
176727291e21SDavid Hildenbrand 	} else {
176827291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
176927291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
177027291e21SDavid Hildenbrand 	}
177127291e21SDavid Hildenbrand 
177227291e21SDavid Hildenbrand 	if (rc) {
177327291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
177427291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
177527291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
177627291e21SDavid Hildenbrand 	}
177727291e21SDavid Hildenbrand 
177827291e21SDavid Hildenbrand 	return rc;
1779b0c632dbSHeiko Carstens }
1780b0c632dbSHeiko Carstens 
178162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
178262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
178362d9f0dbSMarcelo Tosatti {
17846352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
17856352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
17866352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
178762d9f0dbSMarcelo Tosatti }
178862d9f0dbSMarcelo Tosatti 
178962d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
179062d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
179162d9f0dbSMarcelo Tosatti {
17926352e4d2SDavid Hildenbrand 	int rc = 0;
17936352e4d2SDavid Hildenbrand 
17946352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
17956352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
17966352e4d2SDavid Hildenbrand 
17976352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
17986352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
17996352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
18006352e4d2SDavid Hildenbrand 		break;
18016352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
18026352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
18036352e4d2SDavid Hildenbrand 		break;
18046352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
18056352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
18066352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
18076352e4d2SDavid Hildenbrand 	default:
18086352e4d2SDavid Hildenbrand 		rc = -ENXIO;
18096352e4d2SDavid Hildenbrand 	}
18106352e4d2SDavid Hildenbrand 
18116352e4d2SDavid Hildenbrand 	return rc;
181262d9f0dbSMarcelo Tosatti }
181362d9f0dbSMarcelo Tosatti 
18148ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
18158ad35755SDavid Hildenbrand {
18168ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
18178ad35755SDavid Hildenbrand }
18188ad35755SDavid Hildenbrand 
18192c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
18202c70fe44SChristian Borntraeger {
1821785dbef4SChristian Borntraeger 	if (!vcpu->requests)
1822785dbef4SChristian Borntraeger 		return 0;
18238ad35755SDavid Hildenbrand retry:
18248e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
18252c70fe44SChristian Borntraeger 	/*
18262c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
18272c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
18282c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
18292c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
18302c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
18312c70fe44SChristian Borntraeger 	 */
18328ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
18332c70fe44SChristian Borntraeger 		int rc;
18342c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1835fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
18362c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
18372c70fe44SChristian Borntraeger 		if (rc)
18382c70fe44SChristian Borntraeger 			return rc;
18398ad35755SDavid Hildenbrand 		goto retry;
18402c70fe44SChristian Borntraeger 	}
18418ad35755SDavid Hildenbrand 
1842d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1843d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1844d3d692c8SDavid Hildenbrand 		goto retry;
1845d3d692c8SDavid Hildenbrand 	}
1846d3d692c8SDavid Hildenbrand 
18478ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
18488ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
18498ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
18508ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
18518ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
18528ad35755SDavid Hildenbrand 		}
18538ad35755SDavid Hildenbrand 		goto retry;
18548ad35755SDavid Hildenbrand 	}
18558ad35755SDavid Hildenbrand 
18568ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
18578ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
18588ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
18598ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
18608ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
18618ad35755SDavid Hildenbrand 		}
18628ad35755SDavid Hildenbrand 		goto retry;
18638ad35755SDavid Hildenbrand 	}
18648ad35755SDavid Hildenbrand 
18650759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
18660759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
18670759d068SDavid Hildenbrand 
18682c70fe44SChristian Borntraeger 	return 0;
18692c70fe44SChristian Borntraeger }
18702c70fe44SChristian Borntraeger 
1871fa576c58SThomas Huth /**
1872fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1873fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1874fa576c58SThomas Huth  * @gpa: Guest physical address
1875fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1876fa576c58SThomas Huth  *
1877fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1878fa576c58SThomas Huth  *
1879fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1880fa576c58SThomas Huth  */
1881fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
188224eb3a82SDominik Dingel {
1883527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1884527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
188524eb3a82SDominik Dingel }
188624eb3a82SDominik Dingel 
18873c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
18883c038e6bSDominik Dingel 				      unsigned long token)
18893c038e6bSDominik Dingel {
18903c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1891383d0b05SJens Freimann 	struct kvm_s390_irq irq;
18923c038e6bSDominik Dingel 
18933c038e6bSDominik Dingel 	if (start_token) {
1894383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1895383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1896383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
18973c038e6bSDominik Dingel 	} else {
18983c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1899383d0b05SJens Freimann 		inti.parm64 = token;
19003c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
19013c038e6bSDominik Dingel 	}
19023c038e6bSDominik Dingel }
19033c038e6bSDominik Dingel 
19043c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
19053c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
19063c038e6bSDominik Dingel {
19073c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
19083c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
19093c038e6bSDominik Dingel }
19103c038e6bSDominik Dingel 
19113c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
19123c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
19133c038e6bSDominik Dingel {
19143c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
19153c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
19163c038e6bSDominik Dingel }
19173c038e6bSDominik Dingel 
19183c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
19193c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
19203c038e6bSDominik Dingel {
19213c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
19223c038e6bSDominik Dingel }
19233c038e6bSDominik Dingel 
19243c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
19253c038e6bSDominik Dingel {
19263c038e6bSDominik Dingel 	/*
19273c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
19283c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
19293c038e6bSDominik Dingel 	 */
19303c038e6bSDominik Dingel 	return true;
19313c038e6bSDominik Dingel }
19323c038e6bSDominik Dingel 
19333c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
19343c038e6bSDominik Dingel {
19353c038e6bSDominik Dingel 	hva_t hva;
19363c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
19373c038e6bSDominik Dingel 	int rc;
19383c038e6bSDominik Dingel 
19393c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
19403c038e6bSDominik Dingel 		return 0;
19413c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
19423c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
19433c038e6bSDominik Dingel 		return 0;
19443c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
19453c038e6bSDominik Dingel 		return 0;
19469a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
19473c038e6bSDominik Dingel 		return 0;
19483c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
19493c038e6bSDominik Dingel 		return 0;
19503c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
19513c038e6bSDominik Dingel 		return 0;
19523c038e6bSDominik Dingel 
195381480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
195481480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
195581480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
19563c038e6bSDominik Dingel 		return 0;
19573c038e6bSDominik Dingel 
19583c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
19593c038e6bSDominik Dingel 	return rc;
19603c038e6bSDominik Dingel }
19613c038e6bSDominik Dingel 
19623fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1963b0c632dbSHeiko Carstens {
19643fb4c40fSThomas Huth 	int rc, cpuflags;
1965e168bf8dSCarsten Otte 
19663c038e6bSDominik Dingel 	/*
19673c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
19683c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
19693c038e6bSDominik Dingel 	 * handled outside the worker.
19703c038e6bSDominik Dingel 	 */
19713c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
19723c038e6bSDominik Dingel 
19735a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1974b0c632dbSHeiko Carstens 
1975b0c632dbSHeiko Carstens 	if (need_resched())
1976b0c632dbSHeiko Carstens 		schedule();
1977b0c632dbSHeiko Carstens 
1978d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
197971cde587SChristian Borntraeger 		s390_handle_mcck();
198071cde587SChristian Borntraeger 
198179395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
198279395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
198379395031SJens Freimann 		if (rc)
198479395031SJens Freimann 			return rc;
198579395031SJens Freimann 	}
19860ff31867SCarsten Otte 
19872c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
19882c70fe44SChristian Borntraeger 	if (rc)
19892c70fe44SChristian Borntraeger 		return rc;
19902c70fe44SChristian Borntraeger 
199127291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
199227291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
199327291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
199427291e21SDavid Hildenbrand 	}
199527291e21SDavid Hildenbrand 
1996b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
19973fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
19983fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
19993fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
20002b29a9fdSDominik Dingel 
20013fb4c40fSThomas Huth 	return 0;
20023fb4c40fSThomas Huth }
20033fb4c40fSThomas Huth 
2004492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2005492d8642SThomas Huth {
2006492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
2007492d8642SThomas Huth 	u8 opcode;
2008492d8642SThomas Huth 	int rc;
2009492d8642SThomas Huth 
2010492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2011492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2012492d8642SThomas Huth 
2013492d8642SThomas Huth 	/*
2014492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2015492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2016492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2017492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2018492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2019492d8642SThomas Huth 	 * to be able to forward the PSW.
2020492d8642SThomas Huth 	 */
20218ae04b8fSAlexander Yarygin 	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
2022492d8642SThomas Huth 	if (rc)
2023492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
2024492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2025492d8642SThomas Huth 
2026492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2027492d8642SThomas Huth }
2028492d8642SThomas Huth 
20293fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
20303fb4c40fSThomas Huth {
203124eb3a82SDominik Dingel 	int rc = -1;
20322b29a9fdSDominik Dingel 
20332b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
20342b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
20352b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
20362b29a9fdSDominik Dingel 
203727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
203827291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
203927291e21SDavid Hildenbrand 
20403fb4c40fSThomas Huth 	if (exit_reason >= 0) {
20417c470539SMartin Schwidefsky 		rc = 0;
2042210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2043210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2044210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2045210b1607SThomas Huth 						current->thread.gmap_addr;
2046210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
2047210b1607SThomas Huth 		rc = -EREMOTE;
204824eb3a82SDominik Dingel 
204924eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
20503c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
205124eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
2052fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
205324eb3a82SDominik Dingel 			rc = 0;
2054fa576c58SThomas Huth 		} else {
2055fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
2056fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2057fa576c58SThomas Huth 		}
205824eb3a82SDominik Dingel 	}
205924eb3a82SDominik Dingel 
2060492d8642SThomas Huth 	if (rc == -1)
2061492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
2062b0c632dbSHeiko Carstens 
20635a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
20643fb4c40fSThomas Huth 
2065a76ccff6SThomas Huth 	if (rc == 0) {
2066a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
20672955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
20682955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
2069a76ccff6SThomas Huth 		else
2070a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
2071a76ccff6SThomas Huth 	}
2072a76ccff6SThomas Huth 
20733fb4c40fSThomas Huth 	return rc;
20743fb4c40fSThomas Huth }
20753fb4c40fSThomas Huth 
20763fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
20773fb4c40fSThomas Huth {
20783fb4c40fSThomas Huth 	int rc, exit_reason;
20793fb4c40fSThomas Huth 
2080800c1065SThomas Huth 	/*
2081800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2082800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2083800c1065SThomas Huth 	 */
2084800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2085800c1065SThomas Huth 
2086a76ccff6SThomas Huth 	do {
20873fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
20883fb4c40fSThomas Huth 		if (rc)
2089a76ccff6SThomas Huth 			break;
20903fb4c40fSThomas Huth 
2091800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
20923fb4c40fSThomas Huth 		/*
2093a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2094a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
20953fb4c40fSThomas Huth 		 */
20960097d12eSChristian Borntraeger 		local_irq_disable();
20970097d12eSChristian Borntraeger 		__kvm_guest_enter();
20980097d12eSChristian Borntraeger 		local_irq_enable();
2099a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2100a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
21010097d12eSChristian Borntraeger 		local_irq_disable();
21020097d12eSChristian Borntraeger 		__kvm_guest_exit();
21030097d12eSChristian Borntraeger 		local_irq_enable();
2104800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
21053fb4c40fSThomas Huth 
21063fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
210727291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
21083fb4c40fSThomas Huth 
2109800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2110e168bf8dSCarsten Otte 	return rc;
2111b0c632dbSHeiko Carstens }
2112b0c632dbSHeiko Carstens 
2113b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2114b028ee3eSDavid Hildenbrand {
2115b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2116b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2117b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2118b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2119b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2120b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2121d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2122d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2123b028ee3eSDavid Hildenbrand 	}
2124b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2125b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2126b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2127b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2128b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2129b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2130b028ee3eSDavid Hildenbrand 	}
2131b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2132b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2133b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2134b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
21359fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
21369fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2137b028ee3eSDavid Hildenbrand 	}
2138b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2139b028ee3eSDavid Hildenbrand }
2140b028ee3eSDavid Hildenbrand 
2141b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2142b028ee3eSDavid Hildenbrand {
2143b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2144b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2145b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2146b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2147b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2148b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2149b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2150b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2151b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2152b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2153b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2154b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2155b028ee3eSDavid Hildenbrand }
2156b028ee3eSDavid Hildenbrand 
2157b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2158b0c632dbSHeiko Carstens {
21598f2abe6aSChristian Borntraeger 	int rc;
2160b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2161b0c632dbSHeiko Carstens 
216227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
216327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
216427291e21SDavid Hildenbrand 		return 0;
216527291e21SDavid Hildenbrand 	}
216627291e21SDavid Hildenbrand 
2167b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2168b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2169b0c632dbSHeiko Carstens 
21706352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
21716852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
21726352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2173ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
21746352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
21756352e4d2SDavid Hildenbrand 		return -EINVAL;
21766352e4d2SDavid Hildenbrand 	}
2177b0c632dbSHeiko Carstens 
2178b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2179d7b0b5ebSCarsten Otte 
2180dab4079dSHeiko Carstens 	might_fault();
2181e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
21829ace903dSChristian Ehrhardt 
2183b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2184b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
21858f2abe6aSChristian Borntraeger 		rc = -EINTR;
2186b1d16c49SChristian Ehrhardt 	}
21878f2abe6aSChristian Borntraeger 
218827291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
218927291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
219027291e21SDavid Hildenbrand 		rc = 0;
219127291e21SDavid Hildenbrand 	}
219227291e21SDavid Hildenbrand 
2193b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
21948f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
21958f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
21968f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
21978f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
21988f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
21998f2abe6aSChristian Borntraeger 		rc = 0;
22008f2abe6aSChristian Borntraeger 	}
22018f2abe6aSChristian Borntraeger 
22028f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
22038f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
22048f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
22058f2abe6aSChristian Borntraeger 		rc = 0;
22068f2abe6aSChristian Borntraeger 	}
22078f2abe6aSChristian Borntraeger 
2208b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2209d7b0b5ebSCarsten Otte 
2210b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2211b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2212b0c632dbSHeiko Carstens 
2213b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
22147e8e6ab4SHeiko Carstens 	return rc;
2215b0c632dbSHeiko Carstens }
2216b0c632dbSHeiko Carstens 
2217b0c632dbSHeiko Carstens /*
2218b0c632dbSHeiko Carstens  * store status at address
2219b0c632dbSHeiko Carstens  * we use have two special cases:
2220b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2221b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2222b0c632dbSHeiko Carstens  */
2223d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2224b0c632dbSHeiko Carstens {
2225092670cdSCarsten Otte 	unsigned char archmode = 1;
2226fda902cbSMichael Mueller 	unsigned int px;
2227178bd789SThomas Huth 	u64 clkcomp;
2228d0bce605SHeiko Carstens 	int rc;
2229b0c632dbSHeiko Carstens 
2230d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2231d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2232b0c632dbSHeiko Carstens 			return -EFAULT;
2233d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
2234d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2235d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2236b0c632dbSHeiko Carstens 			return -EFAULT;
2237d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2238d0bce605SHeiko Carstens 	}
2239d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2240d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
2241d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2242d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2243d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2244d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2245fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2246d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2247fda902cbSMichael Mueller 			      &px, 4);
2248d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2249d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2250d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2251d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2252d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2253d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2254d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2255178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2256d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2257d0bce605SHeiko Carstens 			      &clkcomp, 8);
2258d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2259d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2260d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2261d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2262d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2263b0c632dbSHeiko Carstens }
2264b0c632dbSHeiko Carstens 
2265e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2266e879892cSThomas Huth {
2267e879892cSThomas Huth 	/*
2268e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2269e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2270e879892cSThomas Huth 	 * it into the save area
2271e879892cSThomas Huth 	 */
2272e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2273e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2274e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2275e879892cSThomas Huth 
2276e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2277e879892cSThomas Huth }
2278e879892cSThomas Huth 
2279bc17de7cSEric Farman /*
2280bc17de7cSEric Farman  * store additional status at address
2281bc17de7cSEric Farman  */
2282bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2283bc17de7cSEric Farman 					unsigned long gpa)
2284bc17de7cSEric Farman {
2285bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2286bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2287bc17de7cSEric Farman 		return 0;
2288bc17de7cSEric Farman 
2289bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2290bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2291bc17de7cSEric Farman }
2292bc17de7cSEric Farman 
2293bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2294bc17de7cSEric Farman {
2295bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2296bc17de7cSEric Farman 		return 0;
2297bc17de7cSEric Farman 
2298bc17de7cSEric Farman 	/*
2299bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2300bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2301bc17de7cSEric Farman 	 * it into the save area.
2302bc17de7cSEric Farman 	 */
2303bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2304bc17de7cSEric Farman 
2305bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2306bc17de7cSEric Farman }
2307bc17de7cSEric Farman 
23088ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
23098ad35755SDavid Hildenbrand {
23108ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
23118e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
23128ad35755SDavid Hildenbrand }
23138ad35755SDavid Hildenbrand 
23148ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
23158ad35755SDavid Hildenbrand {
23168ad35755SDavid Hildenbrand 	unsigned int i;
23178ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
23188ad35755SDavid Hildenbrand 
23198ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
23208ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
23218ad35755SDavid Hildenbrand 	}
23228ad35755SDavid Hildenbrand }
23238ad35755SDavid Hildenbrand 
23248ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
23258ad35755SDavid Hildenbrand {
23268ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
23278e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
23288ad35755SDavid Hildenbrand }
23298ad35755SDavid Hildenbrand 
23306852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
23316852d7b6SDavid Hildenbrand {
23328ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
23338ad35755SDavid Hildenbrand 
23348ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
23358ad35755SDavid Hildenbrand 		return;
23368ad35755SDavid Hildenbrand 
23376852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
23388ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2339433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
23408ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
23418ad35755SDavid Hildenbrand 
23428ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
23438ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
23448ad35755SDavid Hildenbrand 			started_vcpus++;
23458ad35755SDavid Hildenbrand 	}
23468ad35755SDavid Hildenbrand 
23478ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
23488ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
23498ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
23508ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
23518ad35755SDavid Hildenbrand 		/*
23528ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
23538ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
23548ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
23558ad35755SDavid Hildenbrand 		 */
23568ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
23578ad35755SDavid Hildenbrand 	}
23588ad35755SDavid Hildenbrand 
23596852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23608ad35755SDavid Hildenbrand 	/*
23618ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
23628ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
23638ad35755SDavid Hildenbrand 	 */
2364d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2365433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
23668ad35755SDavid Hildenbrand 	return;
23676852d7b6SDavid Hildenbrand }
23686852d7b6SDavid Hildenbrand 
23696852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
23706852d7b6SDavid Hildenbrand {
23718ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
23728ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
23738ad35755SDavid Hildenbrand 
23748ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
23758ad35755SDavid Hildenbrand 		return;
23768ad35755SDavid Hildenbrand 
23776852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
23788ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2379433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
23808ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
23818ad35755SDavid Hildenbrand 
238232f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
23836cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
238432f5ff63SDavid Hildenbrand 
23856cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23868ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
23878ad35755SDavid Hildenbrand 
23888ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
23898ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
23908ad35755SDavid Hildenbrand 			started_vcpus++;
23918ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
23928ad35755SDavid Hildenbrand 		}
23938ad35755SDavid Hildenbrand 	}
23948ad35755SDavid Hildenbrand 
23958ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
23968ad35755SDavid Hildenbrand 		/*
23978ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
23988ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
23998ad35755SDavid Hildenbrand 		 */
24008ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
24018ad35755SDavid Hildenbrand 	}
24028ad35755SDavid Hildenbrand 
2403433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
24048ad35755SDavid Hildenbrand 	return;
24056852d7b6SDavid Hildenbrand }
24066852d7b6SDavid Hildenbrand 
2407d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2408d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2409d6712df9SCornelia Huck {
2410d6712df9SCornelia Huck 	int r;
2411d6712df9SCornelia Huck 
2412d6712df9SCornelia Huck 	if (cap->flags)
2413d6712df9SCornelia Huck 		return -EINVAL;
2414d6712df9SCornelia Huck 
2415d6712df9SCornelia Huck 	switch (cap->cap) {
2416fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2417fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2418fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2419c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2420fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2421fa6b7fe9SCornelia Huck 		}
2422fa6b7fe9SCornelia Huck 		r = 0;
2423fa6b7fe9SCornelia Huck 		break;
2424d6712df9SCornelia Huck 	default:
2425d6712df9SCornelia Huck 		r = -EINVAL;
2426d6712df9SCornelia Huck 		break;
2427d6712df9SCornelia Huck 	}
2428d6712df9SCornelia Huck 	return r;
2429d6712df9SCornelia Huck }
2430d6712df9SCornelia Huck 
243141408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
243241408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
243341408c28SThomas Huth {
243441408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
243541408c28SThomas Huth 	void *tmpbuf = NULL;
243641408c28SThomas Huth 	int r, srcu_idx;
243741408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
243841408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
243941408c28SThomas Huth 
244041408c28SThomas Huth 	if (mop->flags & ~supported_flags)
244141408c28SThomas Huth 		return -EINVAL;
244241408c28SThomas Huth 
244341408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
244441408c28SThomas Huth 		return -E2BIG;
244541408c28SThomas Huth 
244641408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
244741408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
244841408c28SThomas Huth 		if (!tmpbuf)
244941408c28SThomas Huth 			return -ENOMEM;
245041408c28SThomas Huth 	}
245141408c28SThomas Huth 
245241408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
245341408c28SThomas Huth 
245441408c28SThomas Huth 	switch (mop->op) {
245541408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
245641408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
245741408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
245841408c28SThomas Huth 			break;
245941408c28SThomas Huth 		}
246041408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
246141408c28SThomas Huth 		if (r == 0) {
246241408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
246341408c28SThomas Huth 				r = -EFAULT;
246441408c28SThomas Huth 		}
246541408c28SThomas Huth 		break;
246641408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
246741408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
246841408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
246941408c28SThomas Huth 			break;
247041408c28SThomas Huth 		}
247141408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
247241408c28SThomas Huth 			r = -EFAULT;
247341408c28SThomas Huth 			break;
247441408c28SThomas Huth 		}
247541408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
247641408c28SThomas Huth 		break;
247741408c28SThomas Huth 	default:
247841408c28SThomas Huth 		r = -EINVAL;
247941408c28SThomas Huth 	}
248041408c28SThomas Huth 
248141408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
248241408c28SThomas Huth 
248341408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
248441408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
248541408c28SThomas Huth 
248641408c28SThomas Huth 	vfree(tmpbuf);
248741408c28SThomas Huth 	return r;
248841408c28SThomas Huth }
248941408c28SThomas Huth 
2490b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2491b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2492b0c632dbSHeiko Carstens {
2493b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2494b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2495800c1065SThomas Huth 	int idx;
2496bc923cc9SAvi Kivity 	long r;
2497b0c632dbSHeiko Carstens 
249893736624SAvi Kivity 	switch (ioctl) {
249947b43c52SJens Freimann 	case KVM_S390_IRQ: {
250047b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
250147b43c52SJens Freimann 
250247b43c52SJens Freimann 		r = -EFAULT;
250347b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
250447b43c52SJens Freimann 			break;
250547b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
250647b43c52SJens Freimann 		break;
250747b43c52SJens Freimann 	}
250893736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2509ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2510383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2511ba5c1e9bSCarsten Otte 
251293736624SAvi Kivity 		r = -EFAULT;
2513ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
251493736624SAvi Kivity 			break;
2515383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2516383d0b05SJens Freimann 			return -EINVAL;
2517383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
251893736624SAvi Kivity 		break;
2519ba5c1e9bSCarsten Otte 	}
2520b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2521800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2522bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2523800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2524bc923cc9SAvi Kivity 		break;
2525b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2526b0c632dbSHeiko Carstens 		psw_t psw;
2527b0c632dbSHeiko Carstens 
2528bc923cc9SAvi Kivity 		r = -EFAULT;
2529b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2530bc923cc9SAvi Kivity 			break;
2531bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2532bc923cc9SAvi Kivity 		break;
2533b0c632dbSHeiko Carstens 	}
2534b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2535bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2536bc923cc9SAvi Kivity 		break;
253714eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
253814eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
253914eebd91SCarsten Otte 		struct kvm_one_reg reg;
254014eebd91SCarsten Otte 		r = -EFAULT;
254114eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
254214eebd91SCarsten Otte 			break;
254314eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
254414eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
254514eebd91SCarsten Otte 		else
254614eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
254714eebd91SCarsten Otte 		break;
254814eebd91SCarsten Otte 	}
254927e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
255027e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
255127e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
255227e0393fSCarsten Otte 
255327e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
255427e0393fSCarsten Otte 			r = -EFAULT;
255527e0393fSCarsten Otte 			break;
255627e0393fSCarsten Otte 		}
255727e0393fSCarsten Otte 
255827e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
255927e0393fSCarsten Otte 			r = -EINVAL;
256027e0393fSCarsten Otte 			break;
256127e0393fSCarsten Otte 		}
256227e0393fSCarsten Otte 
256327e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
256427e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
256527e0393fSCarsten Otte 		break;
256627e0393fSCarsten Otte 	}
256727e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
256827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
256927e0393fSCarsten Otte 
257027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
257127e0393fSCarsten Otte 			r = -EFAULT;
257227e0393fSCarsten Otte 			break;
257327e0393fSCarsten Otte 		}
257427e0393fSCarsten Otte 
257527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
257627e0393fSCarsten Otte 			r = -EINVAL;
257727e0393fSCarsten Otte 			break;
257827e0393fSCarsten Otte 		}
257927e0393fSCarsten Otte 
258027e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
258127e0393fSCarsten Otte 			ucasmap.length);
258227e0393fSCarsten Otte 		break;
258327e0393fSCarsten Otte 	}
258427e0393fSCarsten Otte #endif
2585ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2586527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2587ccc7910fSCarsten Otte 		break;
2588ccc7910fSCarsten Otte 	}
2589d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2590d6712df9SCornelia Huck 	{
2591d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2592d6712df9SCornelia Huck 		r = -EFAULT;
2593d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2594d6712df9SCornelia Huck 			break;
2595d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2596d6712df9SCornelia Huck 		break;
2597d6712df9SCornelia Huck 	}
259841408c28SThomas Huth 	case KVM_S390_MEM_OP: {
259941408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
260041408c28SThomas Huth 
260141408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
260241408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
260341408c28SThomas Huth 		else
260441408c28SThomas Huth 			r = -EFAULT;
260541408c28SThomas Huth 		break;
260641408c28SThomas Huth 	}
2607816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
2608816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2609816c7667SJens Freimann 
2610816c7667SJens Freimann 		r = -EFAULT;
2611816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2612816c7667SJens Freimann 			break;
2613816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2614816c7667SJens Freimann 		    irq_state.len == 0 ||
2615816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2616816c7667SJens Freimann 			r = -EINVAL;
2617816c7667SJens Freimann 			break;
2618816c7667SJens Freimann 		}
2619816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
2620816c7667SJens Freimann 					   (void __user *) irq_state.buf,
2621816c7667SJens Freimann 					   irq_state.len);
2622816c7667SJens Freimann 		break;
2623816c7667SJens Freimann 	}
2624816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
2625816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2626816c7667SJens Freimann 
2627816c7667SJens Freimann 		r = -EFAULT;
2628816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2629816c7667SJens Freimann 			break;
2630816c7667SJens Freimann 		if (irq_state.len == 0) {
2631816c7667SJens Freimann 			r = -EINVAL;
2632816c7667SJens Freimann 			break;
2633816c7667SJens Freimann 		}
2634816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
2635816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
2636816c7667SJens Freimann 					   irq_state.len);
2637816c7667SJens Freimann 		break;
2638816c7667SJens Freimann 	}
2639b0c632dbSHeiko Carstens 	default:
26403e6afcf1SCarsten Otte 		r = -ENOTTY;
2641b0c632dbSHeiko Carstens 	}
2642bc923cc9SAvi Kivity 	return r;
2643b0c632dbSHeiko Carstens }
2644b0c632dbSHeiko Carstens 
26455b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
26465b1c1493SCarsten Otte {
26475b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
26485b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
26495b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
26505b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
26515b1c1493SCarsten Otte 		get_page(vmf->page);
26525b1c1493SCarsten Otte 		return 0;
26535b1c1493SCarsten Otte 	}
26545b1c1493SCarsten Otte #endif
26555b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
26565b1c1493SCarsten Otte }
26575b1c1493SCarsten Otte 
26585587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
26595587027cSAneesh Kumar K.V 			    unsigned long npages)
2660db3fe4ebSTakuya Yoshikawa {
2661db3fe4ebSTakuya Yoshikawa 	return 0;
2662db3fe4ebSTakuya Yoshikawa }
2663db3fe4ebSTakuya Yoshikawa 
2664b0c632dbSHeiko Carstens /* Section: memory related */
2665f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2666f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
266709170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
26687b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2669b0c632dbSHeiko Carstens {
2670dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2671dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2672dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2673dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2674b0c632dbSHeiko Carstens 
2675598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2676b0c632dbSHeiko Carstens 		return -EINVAL;
2677b0c632dbSHeiko Carstens 
2678598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2679b0c632dbSHeiko Carstens 		return -EINVAL;
2680b0c632dbSHeiko Carstens 
2681f7784b8eSMarcelo Tosatti 	return 0;
2682f7784b8eSMarcelo Tosatti }
2683f7784b8eSMarcelo Tosatti 
2684f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
268509170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
26868482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
2687f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
26888482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2689f7784b8eSMarcelo Tosatti {
2690f7850c92SCarsten Otte 	int rc;
2691f7784b8eSMarcelo Tosatti 
26922cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
26932cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
26942cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
26952cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
26962cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
26972cef4debSChristian Borntraeger 	 */
26982cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
26992cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
27002cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
27012cef4debSChristian Borntraeger 		return;
2702598841caSCarsten Otte 
2703598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2704598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2705598841caSCarsten Otte 	if (rc)
2706ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
2707598841caSCarsten Otte 	return;
2708b0c632dbSHeiko Carstens }
2709b0c632dbSHeiko Carstens 
2710b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2711b0c632dbSHeiko Carstens {
27129d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2713b0c632dbSHeiko Carstens }
2714b0c632dbSHeiko Carstens 
2715b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2716b0c632dbSHeiko Carstens {
2717b0c632dbSHeiko Carstens 	kvm_exit();
2718b0c632dbSHeiko Carstens }
2719b0c632dbSHeiko Carstens 
2720b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2721b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2722566af940SCornelia Huck 
2723566af940SCornelia Huck /*
2724566af940SCornelia Huck  * Enable autoloading of the kvm module.
2725566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2726566af940SCornelia Huck  * since x86 takes a different approach.
2727566af940SCornelia Huck  */
2728566af940SCornelia Huck #include <linux/miscdevice.h>
2729566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2730566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2731