xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 32f5ff63ff9c87195d06896e6ab4086b6369546a)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
55aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
57ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
587697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
59ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6669d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
67453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
68453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
69453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
70453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
71453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
728a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
73453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
74453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
75b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
76453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
77453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
78bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
795288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
80bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
817697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
825288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
855288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
87388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
88e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
8941628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
90b0c632dbSHeiko Carstens 	{ NULL }
91b0c632dbSHeiko Carstens };
92b0c632dbSHeiko Carstens 
9378c4b59fSMichael Mueller unsigned long *vfacilities;
942c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
95b0c632dbSHeiko Carstens 
9678c4b59fSMichael Mueller /* test availability of vfacility */
97280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
9878c4b59fSMichael Mueller {
9978c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10078c4b59fSMichael Mueller }
10178c4b59fSMichael Mueller 
102b0c632dbSHeiko Carstens /* Section: not file related */
10310474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage)
104b0c632dbSHeiko Carstens {
105b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
10610474ae8SAlexander Graf 	return 0;
107b0c632dbSHeiko Carstens }
108b0c632dbSHeiko Carstens 
109b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage)
110b0c632dbSHeiko Carstens {
111b0c632dbSHeiko Carstens }
112b0c632dbSHeiko Carstens 
1132c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1142c70fe44SChristian Borntraeger 
115b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
116b0c632dbSHeiko Carstens {
1172c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1182c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
119b0c632dbSHeiko Carstens 	return 0;
120b0c632dbSHeiko Carstens }
121b0c632dbSHeiko Carstens 
122b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
123b0c632dbSHeiko Carstens {
1242c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
125b0c632dbSHeiko Carstens }
126b0c632dbSHeiko Carstens 
127b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn)
128b0c632dbSHeiko Carstens {
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
133b0c632dbSHeiko Carstens 	return 0;
134b0c632dbSHeiko Carstens }
135b0c632dbSHeiko Carstens 
136b0c632dbSHeiko Carstens void kvm_arch_exit(void)
137b0c632dbSHeiko Carstens {
138b0c632dbSHeiko Carstens }
139b0c632dbSHeiko Carstens 
140b0c632dbSHeiko Carstens /* Section: device related */
141b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
142b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
143b0c632dbSHeiko Carstens {
144b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
145b0c632dbSHeiko Carstens 		return s390_enable_sie();
146b0c632dbSHeiko Carstens 	return -EINVAL;
147b0c632dbSHeiko Carstens }
148b0c632dbSHeiko Carstens 
149b0c632dbSHeiko Carstens int kvm_dev_ioctl_check_extension(long ext)
150b0c632dbSHeiko Carstens {
151d7b0b5ebSCarsten Otte 	int r;
152d7b0b5ebSCarsten Otte 
1532bd0ac4eSCarsten Otte 	switch (ext) {
154d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
155b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15652e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1571efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1581efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1591efd0f59SCarsten Otte #endif
1603c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16160b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
16214eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
163d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
164fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
165ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
167c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
168d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
169f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
170d7b0b5ebSCarsten Otte 		r = 1;
171d7b0b5ebSCarsten Otte 		break;
172e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
173e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
174e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
175e726b1bdSChristian Borntraeger 		break;
176e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
177e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
178e1e2e605SNick Wang 		break;
1791526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
180abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1811526bf9cSChristian Borntraeger 		break;
1822bd0ac4eSCarsten Otte 	default:
183d7b0b5ebSCarsten Otte 		r = 0;
184b0c632dbSHeiko Carstens 	}
185d7b0b5ebSCarsten Otte 	return r;
1862bd0ac4eSCarsten Otte }
187b0c632dbSHeiko Carstens 
18815f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18915f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
19015f36ebdSJason J. Herne {
19115f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19215f36ebdSJason J. Herne 	unsigned long address;
19315f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19415f36ebdSJason J. Herne 
19515f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19615f36ebdSJason J. Herne 	/* Loop over all guest pages */
19715f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19815f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19915f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
20015f36ebdSJason J. Herne 
20115f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20215f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20315f36ebdSJason J. Herne 	}
20415f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20515f36ebdSJason J. Herne }
20615f36ebdSJason J. Herne 
207b0c632dbSHeiko Carstens /* Section: vm related */
208b0c632dbSHeiko Carstens /*
209b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
210b0c632dbSHeiko Carstens  */
211b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
213b0c632dbSHeiko Carstens {
21415f36ebdSJason J. Herne 	int r;
21515f36ebdSJason J. Herne 	unsigned long n;
21615f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21715f36ebdSJason J. Herne 	int is_dirty = 0;
21815f36ebdSJason J. Herne 
21915f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
22015f36ebdSJason J. Herne 
22115f36ebdSJason J. Herne 	r = -EINVAL;
22215f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22315f36ebdSJason J. Herne 		goto out;
22415f36ebdSJason J. Herne 
22515f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22615f36ebdSJason J. Herne 	r = -ENOENT;
22715f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22815f36ebdSJason J. Herne 		goto out;
22915f36ebdSJason J. Herne 
23015f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23115f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23215f36ebdSJason J. Herne 	if (r)
23315f36ebdSJason J. Herne 		goto out;
23415f36ebdSJason J. Herne 
23515f36ebdSJason J. Herne 	/* Clear the dirty log */
23615f36ebdSJason J. Herne 	if (is_dirty) {
23715f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23815f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23915f36ebdSJason J. Herne 	}
24015f36ebdSJason J. Herne 	r = 0;
24115f36ebdSJason J. Herne out:
24215f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24315f36ebdSJason J. Herne 	return r;
244b0c632dbSHeiko Carstens }
245b0c632dbSHeiko Carstens 
246d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247d938dc55SCornelia Huck {
248d938dc55SCornelia Huck 	int r;
249d938dc55SCornelia Huck 
250d938dc55SCornelia Huck 	if (cap->flags)
251d938dc55SCornelia Huck 		return -EINVAL;
252d938dc55SCornelia Huck 
253d938dc55SCornelia Huck 	switch (cap->cap) {
25484223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25584223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25684223598SCornelia Huck 		r = 0;
25784223598SCornelia Huck 		break;
258d938dc55SCornelia Huck 	default:
259d938dc55SCornelia Huck 		r = -EINVAL;
260d938dc55SCornelia Huck 		break;
261d938dc55SCornelia Huck 	}
262d938dc55SCornelia Huck 	return r;
263d938dc55SCornelia Huck }
264d938dc55SCornelia Huck 
2654f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2664f718eabSDominik Dingel {
2674f718eabSDominik Dingel 	int ret;
2684f718eabSDominik Dingel 	unsigned int idx;
2694f718eabSDominik Dingel 	switch (attr->attr) {
2704f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2714f718eabSDominik Dingel 		ret = -EBUSY;
2724f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2734f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2744f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2754f718eabSDominik Dingel 			ret = 0;
2764f718eabSDominik Dingel 		}
2774f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2784f718eabSDominik Dingel 		break;
2794f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
2804f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2814f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
2824f718eabSDominik Dingel 		page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
2834f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
2844f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2854f718eabSDominik Dingel 		ret = 0;
2864f718eabSDominik Dingel 		break;
2874f718eabSDominik Dingel 	default:
2884f718eabSDominik Dingel 		ret = -ENXIO;
2894f718eabSDominik Dingel 		break;
2904f718eabSDominik Dingel 	}
2914f718eabSDominik Dingel 	return ret;
2924f718eabSDominik Dingel }
2934f718eabSDominik Dingel 
294f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
295f2061656SDominik Dingel {
296f2061656SDominik Dingel 	int ret;
297f2061656SDominik Dingel 
298f2061656SDominik Dingel 	switch (attr->group) {
2994f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3004f718eabSDominik Dingel 		ret = kvm_s390_mem_control(kvm, attr);
3014f718eabSDominik Dingel 		break;
302f2061656SDominik Dingel 	default:
303f2061656SDominik Dingel 		ret = -ENXIO;
304f2061656SDominik Dingel 		break;
305f2061656SDominik Dingel 	}
306f2061656SDominik Dingel 
307f2061656SDominik Dingel 	return ret;
308f2061656SDominik Dingel }
309f2061656SDominik Dingel 
310f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
311f2061656SDominik Dingel {
312f2061656SDominik Dingel 	return -ENXIO;
313f2061656SDominik Dingel }
314f2061656SDominik Dingel 
315f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
316f2061656SDominik Dingel {
317f2061656SDominik Dingel 	int ret;
318f2061656SDominik Dingel 
319f2061656SDominik Dingel 	switch (attr->group) {
3204f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3214f718eabSDominik Dingel 		switch (attr->attr) {
3224f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3234f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3244f718eabSDominik Dingel 			ret = 0;
3254f718eabSDominik Dingel 			break;
3264f718eabSDominik Dingel 		default:
3274f718eabSDominik Dingel 			ret = -ENXIO;
3284f718eabSDominik Dingel 			break;
3294f718eabSDominik Dingel 		}
3304f718eabSDominik Dingel 		break;
331f2061656SDominik Dingel 	default:
332f2061656SDominik Dingel 		ret = -ENXIO;
333f2061656SDominik Dingel 		break;
334f2061656SDominik Dingel 	}
335f2061656SDominik Dingel 
336f2061656SDominik Dingel 	return ret;
337f2061656SDominik Dingel }
338f2061656SDominik Dingel 
339b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
340b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
341b0c632dbSHeiko Carstens {
342b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
343b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
344f2061656SDominik Dingel 	struct kvm_device_attr attr;
345b0c632dbSHeiko Carstens 	int r;
346b0c632dbSHeiko Carstens 
347b0c632dbSHeiko Carstens 	switch (ioctl) {
348ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
349ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
350ba5c1e9bSCarsten Otte 
351ba5c1e9bSCarsten Otte 		r = -EFAULT;
352ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
353ba5c1e9bSCarsten Otte 			break;
354ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
355ba5c1e9bSCarsten Otte 		break;
356ba5c1e9bSCarsten Otte 	}
357d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
358d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
359d938dc55SCornelia Huck 		r = -EFAULT;
360d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
361d938dc55SCornelia Huck 			break;
362d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
363d938dc55SCornelia Huck 		break;
364d938dc55SCornelia Huck 	}
36584223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
36684223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
36784223598SCornelia Huck 
36884223598SCornelia Huck 		r = -EINVAL;
36984223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
37084223598SCornelia Huck 			/* Set up dummy routing. */
37184223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
37284223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
37384223598SCornelia Huck 			r = 0;
37484223598SCornelia Huck 		}
37584223598SCornelia Huck 		break;
37684223598SCornelia Huck 	}
377f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
378f2061656SDominik Dingel 		r = -EFAULT;
379f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
380f2061656SDominik Dingel 			break;
381f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
382f2061656SDominik Dingel 		break;
383f2061656SDominik Dingel 	}
384f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
385f2061656SDominik Dingel 		r = -EFAULT;
386f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
387f2061656SDominik Dingel 			break;
388f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
389f2061656SDominik Dingel 		break;
390f2061656SDominik Dingel 	}
391f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
392f2061656SDominik Dingel 		r = -EFAULT;
393f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
394f2061656SDominik Dingel 			break;
395f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
396f2061656SDominik Dingel 		break;
397f2061656SDominik Dingel 	}
398b0c632dbSHeiko Carstens 	default:
399367e1319SAvi Kivity 		r = -ENOTTY;
400b0c632dbSHeiko Carstens 	}
401b0c632dbSHeiko Carstens 
402b0c632dbSHeiko Carstens 	return r;
403b0c632dbSHeiko Carstens }
404b0c632dbSHeiko Carstens 
405e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
406b0c632dbSHeiko Carstens {
407b0c632dbSHeiko Carstens 	int rc;
408b0c632dbSHeiko Carstens 	char debug_name[16];
409f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
410b0c632dbSHeiko Carstens 
411e08b9637SCarsten Otte 	rc = -EINVAL;
412e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
413e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
414e08b9637SCarsten Otte 		goto out_err;
415e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
416e08b9637SCarsten Otte 		goto out_err;
417e08b9637SCarsten Otte #else
418e08b9637SCarsten Otte 	if (type)
419e08b9637SCarsten Otte 		goto out_err;
420e08b9637SCarsten Otte #endif
421e08b9637SCarsten Otte 
422b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
423b0c632dbSHeiko Carstens 	if (rc)
424d89f5effSJan Kiszka 		goto out_err;
425b0c632dbSHeiko Carstens 
426b290411aSCarsten Otte 	rc = -ENOMEM;
427b290411aSCarsten Otte 
428b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
429b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
430d89f5effSJan Kiszka 		goto out_err;
431f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
432f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
433f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
434f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
435b0c632dbSHeiko Carstens 
436b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
437b0c632dbSHeiko Carstens 
438b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
439b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
440b0c632dbSHeiko Carstens 		goto out_nodbf;
441b0c632dbSHeiko Carstens 
442ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
443ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
4448a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
445ba5c1e9bSCarsten Otte 
446b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
447b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
448b0c632dbSHeiko Carstens 
449e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
450e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
451e08b9637SCarsten Otte 	} else {
452598841caSCarsten Otte 		kvm->arch.gmap = gmap_alloc(current->mm);
453598841caSCarsten Otte 		if (!kvm->arch.gmap)
454598841caSCarsten Otte 			goto out_nogmap;
4552c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
45624eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
457e08b9637SCarsten Otte 	}
458fa6b7fe9SCornelia Huck 
459fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
46084223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
461fa6b7fe9SCornelia Huck 
4628ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
4638ad35755SDavid Hildenbrand 
464d89f5effSJan Kiszka 	return 0;
465598841caSCarsten Otte out_nogmap:
466598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
467b0c632dbSHeiko Carstens out_nodbf:
468b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
469d89f5effSJan Kiszka out_err:
470d89f5effSJan Kiszka 	return rc;
471b0c632dbSHeiko Carstens }
472b0c632dbSHeiko Carstens 
473d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
474d329c035SChristian Borntraeger {
475d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
476ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
47767335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
4783c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
47958f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
48058f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
48158f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
482abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
483abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
484abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
48558f9460bSCarsten Otte 	}
486abf4a71eSCarsten Otte 	smp_mb();
48727e0393fSCarsten Otte 
48827e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
48927e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
49027e0393fSCarsten Otte 
491b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
492b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
493d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
494b31288faSKonstantin Weitz 
4956692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
496b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
497d329c035SChristian Borntraeger }
498d329c035SChristian Borntraeger 
499d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
500d329c035SChristian Borntraeger {
501d329c035SChristian Borntraeger 	unsigned int i;
502988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
503d329c035SChristian Borntraeger 
504988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
505988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
506988a2caeSGleb Natapov 
507988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
508988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
509d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
510988a2caeSGleb Natapov 
511988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
512988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
513d329c035SChristian Borntraeger }
514d329c035SChristian Borntraeger 
515ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm)
516ad8ba2cdSSheng Yang {
517ad8ba2cdSSheng Yang }
518ad8ba2cdSSheng Yang 
519b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
520b0c632dbSHeiko Carstens {
521d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
522b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
523d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
52427e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
525598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
526841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
52767335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
528b0c632dbSHeiko Carstens }
529b0c632dbSHeiko Carstens 
530b0c632dbSHeiko Carstens /* Section: vcpu related */
531b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
532b0c632dbSHeiko Carstens {
5333c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5343c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
53527e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm)) {
53627e0393fSCarsten Otte 		vcpu->arch.gmap = gmap_alloc(current->mm);
53727e0393fSCarsten Otte 		if (!vcpu->arch.gmap)
53827e0393fSCarsten Otte 			return -ENOMEM;
5392c70fe44SChristian Borntraeger 		vcpu->arch.gmap->private = vcpu->kvm;
54027e0393fSCarsten Otte 		return 0;
54127e0393fSCarsten Otte 	}
54227e0393fSCarsten Otte 
543598841caSCarsten Otte 	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
54459674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
54559674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
5469eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
5479eed0735SChristian Borntraeger 				    KVM_SYNC_CRS;
548b0c632dbSHeiko Carstens 	return 0;
549b0c632dbSHeiko Carstens }
550b0c632dbSHeiko Carstens 
551b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
552b0c632dbSHeiko Carstens {
5536692cef3SChristian Borntraeger 	/* Nothing todo */
554b0c632dbSHeiko Carstens }
555b0c632dbSHeiko Carstens 
556b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
557b0c632dbSHeiko Carstens {
5584725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5594725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
560b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
5614725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5624725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
56359674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
564480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
5659e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
566b0c632dbSHeiko Carstens }
567b0c632dbSHeiko Carstens 
568b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
569b0c632dbSHeiko Carstens {
5709e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
571480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
5724725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5734725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
57459674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
5754725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5764725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
577b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
578b0c632dbSHeiko Carstens }
579b0c632dbSHeiko Carstens 
580b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
581b0c632dbSHeiko Carstens {
582b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
583b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
584b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
5858d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
586b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
587b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
588b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
589b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
590b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
591b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
592b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
593b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
594b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
595672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
5963c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5973c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
5986852d7b6SDavid Hildenbrand 	kvm_s390_vcpu_stop(vcpu);
5992ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
600b0c632dbSHeiko Carstens }
601b0c632dbSHeiko Carstens 
60242897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
60342897d86SMarcelo Tosatti {
60442897d86SMarcelo Tosatti 	return 0;
60542897d86SMarcelo Tosatti }
60642897d86SMarcelo Tosatti 
607b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
608b31605c1SDominik Dingel {
609b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
610b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
611b31605c1SDominik Dingel }
612b31605c1SDominik Dingel 
613b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
614b31605c1SDominik Dingel {
615b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
616b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
617b31605c1SDominik Dingel 		return -ENOMEM;
618b31605c1SDominik Dingel 
619b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
620b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
621b31605c1SDominik Dingel 	return 0;
622b31605c1SDominik Dingel }
623b31605c1SDominik Dingel 
624b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
625b0c632dbSHeiko Carstens {
626b31605c1SDominik Dingel 	int rc = 0;
627b31288faSKonstantin Weitz 
6289e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
6299e6dabefSCornelia Huck 						    CPUSTAT_SM |
63069d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
63169d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
632fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
6337feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
6347feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
6357feb6bb8SMichael Mueller 
63669d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
6374953919fSDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xD1002000U;
638217a4406SHeiko Carstens 	if (sclp_has_siif())
639217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
64078c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
6415a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
6425a5e6536SMatthew Rosato 				      ICTL_TPROT;
6435a5e6536SMatthew Rosato 
644b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
645b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
646b31605c1SDominik Dingel 		if (rc)
647b31605c1SDominik Dingel 			return rc;
648b31288faSKonstantin Weitz 	}
649ca872302SChristian Borntraeger 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
650ca872302SChristian Borntraeger 	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
651ba5c1e9bSCarsten Otte 		     (unsigned long) vcpu);
652ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
653453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
65492e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
655b31605c1SDominik Dingel 	return rc;
656b0c632dbSHeiko Carstens }
657b0c632dbSHeiko Carstens 
658b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
659b0c632dbSHeiko Carstens 				      unsigned int id)
660b0c632dbSHeiko Carstens {
6614d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
6627feb6bb8SMichael Mueller 	struct sie_page *sie_page;
6634d47555aSCarsten Otte 	int rc = -EINVAL;
664b0c632dbSHeiko Carstens 
6654d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
6664d47555aSCarsten Otte 		goto out;
6674d47555aSCarsten Otte 
6684d47555aSCarsten Otte 	rc = -ENOMEM;
6694d47555aSCarsten Otte 
670b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
671b0c632dbSHeiko Carstens 	if (!vcpu)
6724d47555aSCarsten Otte 		goto out;
673b0c632dbSHeiko Carstens 
6747feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
6757feb6bb8SMichael Mueller 	if (!sie_page)
676b0c632dbSHeiko Carstens 		goto out_free_cpu;
677b0c632dbSHeiko Carstens 
6787feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
6797feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
6807feb6bb8SMichael Mueller 
681b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
68258f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
68358f9460bSCarsten Otte 		if (!kvm->arch.sca) {
68458f9460bSCarsten Otte 			WARN_ON_ONCE(1);
68558f9460bSCarsten Otte 			goto out_free_cpu;
68658f9460bSCarsten Otte 		}
687abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
68858f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
68958f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
69058f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
69158f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
692b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
693fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
69458f9460bSCarsten Otte 	}
695b0c632dbSHeiko Carstens 
696ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
697ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
698ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
699d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
7005288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
701ba5c1e9bSCarsten Otte 
702b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
703b0c632dbSHeiko Carstens 	if (rc)
7047b06bf2fSWei Yongjun 		goto out_free_sie_block;
705b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
706b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
707ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
708b0c632dbSHeiko Carstens 
709b0c632dbSHeiko Carstens 	return vcpu;
7107b06bf2fSWei Yongjun out_free_sie_block:
7117b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
712b0c632dbSHeiko Carstens out_free_cpu:
713b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
7144d47555aSCarsten Otte out:
715b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
716b0c632dbSHeiko Carstens }
717b0c632dbSHeiko Carstens 
718b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
719b0c632dbSHeiko Carstens {
720f87618e8SMichael Mueller 	return kvm_cpu_has_interrupt(vcpu);
721b0c632dbSHeiko Carstens }
722b0c632dbSHeiko Carstens 
72349b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
72449b99e1eSChristian Borntraeger {
72549b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
72649b99e1eSChristian Borntraeger }
72749b99e1eSChristian Borntraeger 
72849b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
72949b99e1eSChristian Borntraeger {
73049b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
73149b99e1eSChristian Borntraeger }
73249b99e1eSChristian Borntraeger 
73349b99e1eSChristian Borntraeger /*
73449b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
73549b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
73649b99e1eSChristian Borntraeger  * return immediately. */
73749b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
73849b99e1eSChristian Borntraeger {
73949b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
74049b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
74149b99e1eSChristian Borntraeger 		cpu_relax();
74249b99e1eSChristian Borntraeger }
74349b99e1eSChristian Borntraeger 
74449b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
74549b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
74649b99e1eSChristian Borntraeger {
74749b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
74849b99e1eSChristian Borntraeger 	exit_sie(vcpu);
74949b99e1eSChristian Borntraeger }
75049b99e1eSChristian Borntraeger 
7512c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
7522c70fe44SChristian Borntraeger {
7532c70fe44SChristian Borntraeger 	int i;
7542c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
7552c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
7562c70fe44SChristian Borntraeger 
7572c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
7582c70fe44SChristian Borntraeger 		/* match against both prefix pages */
759fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
7602c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
7612c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
7622c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
7632c70fe44SChristian Borntraeger 		}
7642c70fe44SChristian Borntraeger 	}
7652c70fe44SChristian Borntraeger }
7662c70fe44SChristian Borntraeger 
767b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
768b6d33834SChristoffer Dall {
769b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
770b6d33834SChristoffer Dall 	BUG();
771b6d33834SChristoffer Dall 	return 0;
772b6d33834SChristoffer Dall }
773b6d33834SChristoffer Dall 
77414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
77514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
77614eebd91SCarsten Otte {
77714eebd91SCarsten Otte 	int r = -EINVAL;
77814eebd91SCarsten Otte 
77914eebd91SCarsten Otte 	switch (reg->id) {
78029b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
78129b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
78229b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
78329b7c71bSCarsten Otte 		break;
78429b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
78529b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
78629b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
78729b7c71bSCarsten Otte 		break;
78846a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
78946a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
79046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
79146a6dd1cSJason J. herne 		break;
79246a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
79346a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
79446a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
79546a6dd1cSJason J. herne 		break;
796536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
797536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
798536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
799536336c2SDominik Dingel 		break;
800536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
801536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
802536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
803536336c2SDominik Dingel 		break;
804536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
805536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
806536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
807536336c2SDominik Dingel 		break;
808672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
809672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
810672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
811672550fbSChristian Borntraeger 		break;
812afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
813afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
814afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
815afa45ff5SChristian Borntraeger 		break;
81614eebd91SCarsten Otte 	default:
81714eebd91SCarsten Otte 		break;
81814eebd91SCarsten Otte 	}
81914eebd91SCarsten Otte 
82014eebd91SCarsten Otte 	return r;
82114eebd91SCarsten Otte }
82214eebd91SCarsten Otte 
82314eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
82414eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
82514eebd91SCarsten Otte {
82614eebd91SCarsten Otte 	int r = -EINVAL;
82714eebd91SCarsten Otte 
82814eebd91SCarsten Otte 	switch (reg->id) {
82929b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
83029b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
83129b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
83229b7c71bSCarsten Otte 		break;
83329b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
83429b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
83529b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
83629b7c71bSCarsten Otte 		break;
83746a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
83846a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
83946a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
84046a6dd1cSJason J. herne 		break;
84146a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
84246a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
84346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
84446a6dd1cSJason J. herne 		break;
845536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
846536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
847536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
848536336c2SDominik Dingel 		break;
849536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
850536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
851536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
852536336c2SDominik Dingel 		break;
853536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
854536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
855536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
856536336c2SDominik Dingel 		break;
857672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
858672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
859672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
860672550fbSChristian Borntraeger 		break;
861afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
862afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
863afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
864afa45ff5SChristian Borntraeger 		break;
86514eebd91SCarsten Otte 	default:
86614eebd91SCarsten Otte 		break;
86714eebd91SCarsten Otte 	}
86814eebd91SCarsten Otte 
86914eebd91SCarsten Otte 	return r;
87014eebd91SCarsten Otte }
871b6d33834SChristoffer Dall 
872b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
873b0c632dbSHeiko Carstens {
874b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
875b0c632dbSHeiko Carstens 	return 0;
876b0c632dbSHeiko Carstens }
877b0c632dbSHeiko Carstens 
878b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
879b0c632dbSHeiko Carstens {
8805a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
881b0c632dbSHeiko Carstens 	return 0;
882b0c632dbSHeiko Carstens }
883b0c632dbSHeiko Carstens 
884b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
885b0c632dbSHeiko Carstens {
8865a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
887b0c632dbSHeiko Carstens 	return 0;
888b0c632dbSHeiko Carstens }
889b0c632dbSHeiko Carstens 
890b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
891b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
892b0c632dbSHeiko Carstens {
89359674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
894b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
89559674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
896b0c632dbSHeiko Carstens 	return 0;
897b0c632dbSHeiko Carstens }
898b0c632dbSHeiko Carstens 
899b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
900b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
901b0c632dbSHeiko Carstens {
90259674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
903b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
904b0c632dbSHeiko Carstens 	return 0;
905b0c632dbSHeiko Carstens }
906b0c632dbSHeiko Carstens 
907b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
908b0c632dbSHeiko Carstens {
9094725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
9104725c860SMartin Schwidefsky 		return -EINVAL;
911b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
9124725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
9134725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
9144725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
915b0c632dbSHeiko Carstens 	return 0;
916b0c632dbSHeiko Carstens }
917b0c632dbSHeiko Carstens 
918b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
919b0c632dbSHeiko Carstens {
920b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
921b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
922b0c632dbSHeiko Carstens 	return 0;
923b0c632dbSHeiko Carstens }
924b0c632dbSHeiko Carstens 
925b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
926b0c632dbSHeiko Carstens {
927b0c632dbSHeiko Carstens 	int rc = 0;
928b0c632dbSHeiko Carstens 
9299e6dabefSCornelia Huck 	if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
930b0c632dbSHeiko Carstens 		rc = -EBUSY;
931d7b0b5ebSCarsten Otte 	else {
932d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
933d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
934d7b0b5ebSCarsten Otte 	}
935b0c632dbSHeiko Carstens 	return rc;
936b0c632dbSHeiko Carstens }
937b0c632dbSHeiko Carstens 
938b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
939b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
940b0c632dbSHeiko Carstens {
941b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
942b0c632dbSHeiko Carstens }
943b0c632dbSHeiko Carstens 
94427291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
94527291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
94627291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
94727291e21SDavid Hildenbrand 
948d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
949d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
950b0c632dbSHeiko Carstens {
95127291e21SDavid Hildenbrand 	int rc = 0;
95227291e21SDavid Hildenbrand 
95327291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
95427291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
95527291e21SDavid Hildenbrand 
9562de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
95727291e21SDavid Hildenbrand 		return -EINVAL;
95827291e21SDavid Hildenbrand 
95927291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
96027291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
96127291e21SDavid Hildenbrand 		/* enforce guest PER */
96227291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
96327291e21SDavid Hildenbrand 
96427291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
96527291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
96627291e21SDavid Hildenbrand 	} else {
96727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
96827291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
96927291e21SDavid Hildenbrand 	}
97027291e21SDavid Hildenbrand 
97127291e21SDavid Hildenbrand 	if (rc) {
97227291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
97327291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
97427291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
97527291e21SDavid Hildenbrand 	}
97627291e21SDavid Hildenbrand 
97727291e21SDavid Hildenbrand 	return rc;
978b0c632dbSHeiko Carstens }
979b0c632dbSHeiko Carstens 
98062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
98162d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
98262d9f0dbSMarcelo Tosatti {
98362d9f0dbSMarcelo Tosatti 	return -EINVAL; /* not implemented yet */
98462d9f0dbSMarcelo Tosatti }
98562d9f0dbSMarcelo Tosatti 
98662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
98762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
98862d9f0dbSMarcelo Tosatti {
98962d9f0dbSMarcelo Tosatti 	return -EINVAL; /* not implemented yet */
99062d9f0dbSMarcelo Tosatti }
99162d9f0dbSMarcelo Tosatti 
992b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
993b31605c1SDominik Dingel {
994b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
995b31605c1SDominik Dingel 		return false;
996b31605c1SDominik Dingel 	/* only enable for z10 and later */
997b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
998b31605c1SDominik Dingel 		return false;
999b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1000b31605c1SDominik Dingel 		return false;
1001b31605c1SDominik Dingel 	return true;
1002b31605c1SDominik Dingel }
1003b31605c1SDominik Dingel 
10048ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
10058ad35755SDavid Hildenbrand {
10068ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
10078ad35755SDavid Hildenbrand }
10088ad35755SDavid Hildenbrand 
10092c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
10102c70fe44SChristian Borntraeger {
10118ad35755SDavid Hildenbrand retry:
10128ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
10132c70fe44SChristian Borntraeger 	/*
10142c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
10152c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
10162c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
10172c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
10182c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
10192c70fe44SChristian Borntraeger 	 */
10208ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
10212c70fe44SChristian Borntraeger 		int rc;
10222c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1023fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
10242c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
10252c70fe44SChristian Borntraeger 		if (rc)
10262c70fe44SChristian Borntraeger 			return rc;
10278ad35755SDavid Hildenbrand 		goto retry;
10282c70fe44SChristian Borntraeger 	}
10298ad35755SDavid Hildenbrand 
10308ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
10318ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
10328ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
10338ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
10348ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
10358ad35755SDavid Hildenbrand 		}
10368ad35755SDavid Hildenbrand 		goto retry;
10378ad35755SDavid Hildenbrand 	}
10388ad35755SDavid Hildenbrand 
10398ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
10408ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
10418ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
10428ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
10438ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
10448ad35755SDavid Hildenbrand 		}
10458ad35755SDavid Hildenbrand 		goto retry;
10468ad35755SDavid Hildenbrand 	}
10478ad35755SDavid Hildenbrand 
10482c70fe44SChristian Borntraeger 	return 0;
10492c70fe44SChristian Borntraeger }
10502c70fe44SChristian Borntraeger 
1051fa576c58SThomas Huth /**
1052fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1053fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1054fa576c58SThomas Huth  * @gpa: Guest physical address
1055fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1056fa576c58SThomas Huth  *
1057fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1058fa576c58SThomas Huth  *
1059fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1060fa576c58SThomas Huth  */
1061fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
106224eb3a82SDominik Dingel {
106324eb3a82SDominik Dingel 	struct mm_struct *mm = current->mm;
1064fa576c58SThomas Huth 	hva_t hva;
1065fa576c58SThomas Huth 	long rc;
1066fa576c58SThomas Huth 
1067fa576c58SThomas Huth 	hva = gmap_fault(gpa, vcpu->arch.gmap);
1068fa576c58SThomas Huth 	if (IS_ERR_VALUE(hva))
1069fa576c58SThomas Huth 		return (long)hva;
107024eb3a82SDominik Dingel 	down_read(&mm->mmap_sem);
1071fa576c58SThomas Huth 	rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
107224eb3a82SDominik Dingel 	up_read(&mm->mmap_sem);
1073fa576c58SThomas Huth 
1074fa576c58SThomas Huth 	return rc < 0 ? rc : 0;
107524eb3a82SDominik Dingel }
107624eb3a82SDominik Dingel 
10773c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
10783c038e6bSDominik Dingel 				      unsigned long token)
10793c038e6bSDominik Dingel {
10803c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
10813c038e6bSDominik Dingel 	inti.parm64 = token;
10823c038e6bSDominik Dingel 
10833c038e6bSDominik Dingel 	if (start_token) {
10843c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_INIT;
10853c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
10863c038e6bSDominik Dingel 	} else {
10873c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
10883c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
10893c038e6bSDominik Dingel 	}
10903c038e6bSDominik Dingel }
10913c038e6bSDominik Dingel 
10923c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
10933c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
10943c038e6bSDominik Dingel {
10953c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
10963c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
10973c038e6bSDominik Dingel }
10983c038e6bSDominik Dingel 
10993c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
11003c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
11013c038e6bSDominik Dingel {
11023c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
11033c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
11043c038e6bSDominik Dingel }
11053c038e6bSDominik Dingel 
11063c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
11073c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
11083c038e6bSDominik Dingel {
11093c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
11103c038e6bSDominik Dingel }
11113c038e6bSDominik Dingel 
11123c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
11133c038e6bSDominik Dingel {
11143c038e6bSDominik Dingel 	/*
11153c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
11163c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
11173c038e6bSDominik Dingel 	 */
11183c038e6bSDominik Dingel 	return true;
11193c038e6bSDominik Dingel }
11203c038e6bSDominik Dingel 
11213c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
11223c038e6bSDominik Dingel {
11233c038e6bSDominik Dingel 	hva_t hva;
11243c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
11253c038e6bSDominik Dingel 	int rc;
11263c038e6bSDominik Dingel 
11273c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
11283c038e6bSDominik Dingel 		return 0;
11293c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
11303c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
11313c038e6bSDominik Dingel 		return 0;
11323c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
11333c038e6bSDominik Dingel 		return 0;
11343c038e6bSDominik Dingel 	if (kvm_cpu_has_interrupt(vcpu))
11353c038e6bSDominik Dingel 		return 0;
11363c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
11373c038e6bSDominik Dingel 		return 0;
11383c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
11393c038e6bSDominik Dingel 		return 0;
11403c038e6bSDominik Dingel 
114181480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
114281480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
114381480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
11443c038e6bSDominik Dingel 		return 0;
11453c038e6bSDominik Dingel 
11463c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
11473c038e6bSDominik Dingel 	return rc;
11483c038e6bSDominik Dingel }
11493c038e6bSDominik Dingel 
11503fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1151b0c632dbSHeiko Carstens {
11523fb4c40fSThomas Huth 	int rc, cpuflags;
1153e168bf8dSCarsten Otte 
11543c038e6bSDominik Dingel 	/*
11553c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
11563c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
11573c038e6bSDominik Dingel 	 * handled outside the worker.
11583c038e6bSDominik Dingel 	 */
11593c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
11603c038e6bSDominik Dingel 
11615a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1162b0c632dbSHeiko Carstens 
1163b0c632dbSHeiko Carstens 	if (need_resched())
1164b0c632dbSHeiko Carstens 		schedule();
1165b0c632dbSHeiko Carstens 
1166d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
116771cde587SChristian Borntraeger 		s390_handle_mcck();
116871cde587SChristian Borntraeger 
1169d6b6d166SCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm))
11700ff31867SCarsten Otte 		kvm_s390_deliver_pending_interrupts(vcpu);
11710ff31867SCarsten Otte 
11722c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
11732c70fe44SChristian Borntraeger 	if (rc)
11742c70fe44SChristian Borntraeger 		return rc;
11752c70fe44SChristian Borntraeger 
117627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
117727291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
117827291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
117927291e21SDavid Hildenbrand 	}
118027291e21SDavid Hildenbrand 
1181b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
11823fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
11833fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
11843fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
11852b29a9fdSDominik Dingel 
11863fb4c40fSThomas Huth 	return 0;
11873fb4c40fSThomas Huth }
11883fb4c40fSThomas Huth 
11893fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
11903fb4c40fSThomas Huth {
119124eb3a82SDominik Dingel 	int rc = -1;
11922b29a9fdSDominik Dingel 
11932b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
11942b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
11952b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
11962b29a9fdSDominik Dingel 
119727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
119827291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
119927291e21SDavid Hildenbrand 
12003fb4c40fSThomas Huth 	if (exit_reason >= 0) {
12017c470539SMartin Schwidefsky 		rc = 0;
1202210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1203210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1204210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1205210b1607SThomas Huth 						current->thread.gmap_addr;
1206210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1207210b1607SThomas Huth 		rc = -EREMOTE;
120824eb3a82SDominik Dingel 
120924eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
12103c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
121124eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1212fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
121324eb3a82SDominik Dingel 			rc = 0;
1214fa576c58SThomas Huth 		} else {
1215fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1216fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1217fa576c58SThomas Huth 		}
121824eb3a82SDominik Dingel 	}
121924eb3a82SDominik Dingel 
122024eb3a82SDominik Dingel 	if (rc == -1) {
1221699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1222699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1223699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
12241f0d0f09SCarsten Otte 	}
1225b0c632dbSHeiko Carstens 
12265a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
12273fb4c40fSThomas Huth 
1228a76ccff6SThomas Huth 	if (rc == 0) {
1229a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
12302955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
12312955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1232a76ccff6SThomas Huth 		else
1233a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1234a76ccff6SThomas Huth 	}
1235a76ccff6SThomas Huth 
12363fb4c40fSThomas Huth 	return rc;
12373fb4c40fSThomas Huth }
12383fb4c40fSThomas Huth 
12393fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
12403fb4c40fSThomas Huth {
12413fb4c40fSThomas Huth 	int rc, exit_reason;
12423fb4c40fSThomas Huth 
1243800c1065SThomas Huth 	/*
1244800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1245800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1246800c1065SThomas Huth 	 */
1247800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1248800c1065SThomas Huth 
1249a76ccff6SThomas Huth 	do {
12503fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
12513fb4c40fSThomas Huth 		if (rc)
1252a76ccff6SThomas Huth 			break;
12533fb4c40fSThomas Huth 
1254800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
12553fb4c40fSThomas Huth 		/*
1256a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1257a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
12583fb4c40fSThomas Huth 		 */
12593fb4c40fSThomas Huth 		preempt_disable();
12603fb4c40fSThomas Huth 		kvm_guest_enter();
12613fb4c40fSThomas Huth 		preempt_enable();
1262a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1263a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
12643fb4c40fSThomas Huth 		kvm_guest_exit();
1265800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
12663fb4c40fSThomas Huth 
12673fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
126827291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
12693fb4c40fSThomas Huth 
1270800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1271e168bf8dSCarsten Otte 	return rc;
1272b0c632dbSHeiko Carstens }
1273b0c632dbSHeiko Carstens 
1274b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1275b0c632dbSHeiko Carstens {
12768f2abe6aSChristian Borntraeger 	int rc;
1277b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1278b0c632dbSHeiko Carstens 
127927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
128027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
128127291e21SDavid Hildenbrand 		return 0;
128227291e21SDavid Hildenbrand 	}
128327291e21SDavid Hildenbrand 
1284b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1285b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1286b0c632dbSHeiko Carstens 
12876852d7b6SDavid Hildenbrand 	kvm_s390_vcpu_start(vcpu);
1288b0c632dbSHeiko Carstens 
12898f2abe6aSChristian Borntraeger 	switch (kvm_run->exit_reason) {
12908f2abe6aSChristian Borntraeger 	case KVM_EXIT_S390_SIEIC:
12918f2abe6aSChristian Borntraeger 	case KVM_EXIT_UNKNOWN:
12929ace903dSChristian Ehrhardt 	case KVM_EXIT_INTR:
12938f2abe6aSChristian Borntraeger 	case KVM_EXIT_S390_RESET:
1294e168bf8dSCarsten Otte 	case KVM_EXIT_S390_UCONTROL:
1295fa6b7fe9SCornelia Huck 	case KVM_EXIT_S390_TSCH:
129627291e21SDavid Hildenbrand 	case KVM_EXIT_DEBUG:
12978f2abe6aSChristian Borntraeger 		break;
12988f2abe6aSChristian Borntraeger 	default:
12998f2abe6aSChristian Borntraeger 		BUG();
13008f2abe6aSChristian Borntraeger 	}
13018f2abe6aSChristian Borntraeger 
1302d7b0b5ebSCarsten Otte 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1303d7b0b5ebSCarsten Otte 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
130460b413c9SChristian Borntraeger 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
130560b413c9SChristian Borntraeger 		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
130660b413c9SChristian Borntraeger 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
130760b413c9SChristian Borntraeger 	}
13089eed0735SChristian Borntraeger 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
13099eed0735SChristian Borntraeger 		kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
13109eed0735SChristian Borntraeger 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
13119eed0735SChristian Borntraeger 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
13129eed0735SChristian Borntraeger 	}
1313d7b0b5ebSCarsten Otte 
1314dab4079dSHeiko Carstens 	might_fault();
1315e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
13169ace903dSChristian Ehrhardt 
1317b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1318b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
13198f2abe6aSChristian Borntraeger 		rc = -EINTR;
1320b1d16c49SChristian Ehrhardt 	}
13218f2abe6aSChristian Borntraeger 
132227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
132327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
132427291e21SDavid Hildenbrand 		rc = 0;
132527291e21SDavid Hildenbrand 	}
132627291e21SDavid Hildenbrand 
1327b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
13288f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
13298f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
13308f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
13318f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
13328f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
13338f2abe6aSChristian Borntraeger 		rc = 0;
13348f2abe6aSChristian Borntraeger 	}
13358f2abe6aSChristian Borntraeger 
13368f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
13378f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
13388f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
13398f2abe6aSChristian Borntraeger 		rc = 0;
13408f2abe6aSChristian Borntraeger 	}
13418f2abe6aSChristian Borntraeger 
1342d7b0b5ebSCarsten Otte 	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
1343d7b0b5ebSCarsten Otte 	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
1344fda902cbSMichael Mueller 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
13459eed0735SChristian Borntraeger 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1346d7b0b5ebSCarsten Otte 
1347b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1348b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1349b0c632dbSHeiko Carstens 
1350b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
13517e8e6ab4SHeiko Carstens 	return rc;
1352b0c632dbSHeiko Carstens }
1353b0c632dbSHeiko Carstens 
1354b0c632dbSHeiko Carstens /*
1355b0c632dbSHeiko Carstens  * store status at address
1356b0c632dbSHeiko Carstens  * we use have two special cases:
1357b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1358b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1359b0c632dbSHeiko Carstens  */
1360d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1361b0c632dbSHeiko Carstens {
1362092670cdSCarsten Otte 	unsigned char archmode = 1;
1363fda902cbSMichael Mueller 	unsigned int px;
1364178bd789SThomas Huth 	u64 clkcomp;
1365d0bce605SHeiko Carstens 	int rc;
1366b0c632dbSHeiko Carstens 
1367d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1368d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1369b0c632dbSHeiko Carstens 			return -EFAULT;
1370d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1371d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1372d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1373b0c632dbSHeiko Carstens 			return -EFAULT;
1374d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1375d0bce605SHeiko Carstens 	}
1376d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1377d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1378d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1379d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1380d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1381d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1382fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1383d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1384fda902cbSMichael Mueller 			      &px, 4);
1385d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1386d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1387d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1388d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1389d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1390d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1391d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1392178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1393d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1394d0bce605SHeiko Carstens 			      &clkcomp, 8);
1395d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1396d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1397d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1398d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1399d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1400b0c632dbSHeiko Carstens }
1401b0c632dbSHeiko Carstens 
1402e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1403e879892cSThomas Huth {
1404e879892cSThomas Huth 	/*
1405e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1406e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1407e879892cSThomas Huth 	 * it into the save area
1408e879892cSThomas Huth 	 */
1409e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1410e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1411e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1412e879892cSThomas Huth 
1413e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1414e879892cSThomas Huth }
1415e879892cSThomas Huth 
14168ad35755SDavid Hildenbrand static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
14178ad35755SDavid Hildenbrand {
14188ad35755SDavid Hildenbrand 	return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
14198ad35755SDavid Hildenbrand }
14208ad35755SDavid Hildenbrand 
14218ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14228ad35755SDavid Hildenbrand {
14238ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
14248ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
14258ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14268ad35755SDavid Hildenbrand }
14278ad35755SDavid Hildenbrand 
14288ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
14298ad35755SDavid Hildenbrand {
14308ad35755SDavid Hildenbrand 	unsigned int i;
14318ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
14328ad35755SDavid Hildenbrand 
14338ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
14348ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
14358ad35755SDavid Hildenbrand 	}
14368ad35755SDavid Hildenbrand }
14378ad35755SDavid Hildenbrand 
14388ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14398ad35755SDavid Hildenbrand {
14408ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
14418ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
14428ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14438ad35755SDavid Hildenbrand }
14448ad35755SDavid Hildenbrand 
14456852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
14466852d7b6SDavid Hildenbrand {
14478ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
14488ad35755SDavid Hildenbrand 
14498ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
14508ad35755SDavid Hildenbrand 		return;
14518ad35755SDavid Hildenbrand 
14526852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
14538ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
14548ad35755SDavid Hildenbrand 	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
14558ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
14568ad35755SDavid Hildenbrand 
14578ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
14588ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
14598ad35755SDavid Hildenbrand 			started_vcpus++;
14608ad35755SDavid Hildenbrand 	}
14618ad35755SDavid Hildenbrand 
14628ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
14638ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
14648ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
14658ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
14668ad35755SDavid Hildenbrand 		/*
14678ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
14688ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
14698ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
14708ad35755SDavid Hildenbrand 		 */
14718ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
14728ad35755SDavid Hildenbrand 	}
14738ad35755SDavid Hildenbrand 
14746852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
14758ad35755SDavid Hildenbrand 	/*
14768ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
14778ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
14788ad35755SDavid Hildenbrand 	 */
14798ad35755SDavid Hildenbrand 	vcpu->arch.sie_block->ihcpu  = 0xffff;
14808ad35755SDavid Hildenbrand 	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
14818ad35755SDavid Hildenbrand 	return;
14826852d7b6SDavid Hildenbrand }
14836852d7b6SDavid Hildenbrand 
14846852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
14856852d7b6SDavid Hildenbrand {
14868ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
14878ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
14888ad35755SDavid Hildenbrand 
14898ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
14908ad35755SDavid Hildenbrand 		return;
14918ad35755SDavid Hildenbrand 
14926852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
14938ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
14948ad35755SDavid Hildenbrand 	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
14958ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
14968ad35755SDavid Hildenbrand 
1497*32f5ff63SDavid Hildenbrand 	/* Need to lock access to action_bits to avoid a SIGP race condition */
1498*32f5ff63SDavid Hildenbrand 	spin_lock_bh(&vcpu->arch.local_int.lock);
14996852d7b6SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1500*32f5ff63SDavid Hildenbrand 
1501*32f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1502*32f5ff63SDavid Hildenbrand 	vcpu->arch.local_int.action_bits &=
1503*32f5ff63SDavid Hildenbrand 				 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1504*32f5ff63SDavid Hildenbrand 	spin_unlock_bh(&vcpu->arch.local_int.lock);
1505*32f5ff63SDavid Hildenbrand 
15068ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
15078ad35755SDavid Hildenbrand 
15088ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15098ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
15108ad35755SDavid Hildenbrand 			started_vcpus++;
15118ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
15128ad35755SDavid Hildenbrand 		}
15138ad35755SDavid Hildenbrand 	}
15148ad35755SDavid Hildenbrand 
15158ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
15168ad35755SDavid Hildenbrand 		/*
15178ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
15188ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
15198ad35755SDavid Hildenbrand 		 */
15208ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
15218ad35755SDavid Hildenbrand 	}
15228ad35755SDavid Hildenbrand 
15238ad35755SDavid Hildenbrand 	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
15248ad35755SDavid Hildenbrand 	return;
15256852d7b6SDavid Hildenbrand }
15266852d7b6SDavid Hildenbrand 
1527d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1528d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1529d6712df9SCornelia Huck {
1530d6712df9SCornelia Huck 	int r;
1531d6712df9SCornelia Huck 
1532d6712df9SCornelia Huck 	if (cap->flags)
1533d6712df9SCornelia Huck 		return -EINVAL;
1534d6712df9SCornelia Huck 
1535d6712df9SCornelia Huck 	switch (cap->cap) {
1536fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1537fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1538fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1539fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1540fa6b7fe9SCornelia Huck 		}
1541fa6b7fe9SCornelia Huck 		r = 0;
1542fa6b7fe9SCornelia Huck 		break;
1543d6712df9SCornelia Huck 	default:
1544d6712df9SCornelia Huck 		r = -EINVAL;
1545d6712df9SCornelia Huck 		break;
1546d6712df9SCornelia Huck 	}
1547d6712df9SCornelia Huck 	return r;
1548d6712df9SCornelia Huck }
1549d6712df9SCornelia Huck 
1550b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1551b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1552b0c632dbSHeiko Carstens {
1553b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1554b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1555800c1065SThomas Huth 	int idx;
1556bc923cc9SAvi Kivity 	long r;
1557b0c632dbSHeiko Carstens 
155893736624SAvi Kivity 	switch (ioctl) {
155993736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1560ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1561ba5c1e9bSCarsten Otte 
156293736624SAvi Kivity 		r = -EFAULT;
1563ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
156493736624SAvi Kivity 			break;
156593736624SAvi Kivity 		r = kvm_s390_inject_vcpu(vcpu, &s390int);
156693736624SAvi Kivity 		break;
1567ba5c1e9bSCarsten Otte 	}
1568b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1569800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1570bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1571800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1572bc923cc9SAvi Kivity 		break;
1573b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1574b0c632dbSHeiko Carstens 		psw_t psw;
1575b0c632dbSHeiko Carstens 
1576bc923cc9SAvi Kivity 		r = -EFAULT;
1577b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1578bc923cc9SAvi Kivity 			break;
1579bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1580bc923cc9SAvi Kivity 		break;
1581b0c632dbSHeiko Carstens 	}
1582b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1583bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1584bc923cc9SAvi Kivity 		break;
158514eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
158614eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
158714eebd91SCarsten Otte 		struct kvm_one_reg reg;
158814eebd91SCarsten Otte 		r = -EFAULT;
158914eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
159014eebd91SCarsten Otte 			break;
159114eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
159214eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
159314eebd91SCarsten Otte 		else
159414eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
159514eebd91SCarsten Otte 		break;
159614eebd91SCarsten Otte 	}
159727e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
159827e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
159927e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
160027e0393fSCarsten Otte 
160127e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
160227e0393fSCarsten Otte 			r = -EFAULT;
160327e0393fSCarsten Otte 			break;
160427e0393fSCarsten Otte 		}
160527e0393fSCarsten Otte 
160627e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
160727e0393fSCarsten Otte 			r = -EINVAL;
160827e0393fSCarsten Otte 			break;
160927e0393fSCarsten Otte 		}
161027e0393fSCarsten Otte 
161127e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
161227e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
161327e0393fSCarsten Otte 		break;
161427e0393fSCarsten Otte 	}
161527e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
161627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
161727e0393fSCarsten Otte 
161827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
161927e0393fSCarsten Otte 			r = -EFAULT;
162027e0393fSCarsten Otte 			break;
162127e0393fSCarsten Otte 		}
162227e0393fSCarsten Otte 
162327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
162427e0393fSCarsten Otte 			r = -EINVAL;
162527e0393fSCarsten Otte 			break;
162627e0393fSCarsten Otte 		}
162727e0393fSCarsten Otte 
162827e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
162927e0393fSCarsten Otte 			ucasmap.length);
163027e0393fSCarsten Otte 		break;
163127e0393fSCarsten Otte 	}
163227e0393fSCarsten Otte #endif
1633ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1634ccc7910fSCarsten Otte 		r = gmap_fault(arg, vcpu->arch.gmap);
1635ccc7910fSCarsten Otte 		if (!IS_ERR_VALUE(r))
1636ccc7910fSCarsten Otte 			r = 0;
1637ccc7910fSCarsten Otte 		break;
1638ccc7910fSCarsten Otte 	}
1639d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1640d6712df9SCornelia Huck 	{
1641d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1642d6712df9SCornelia Huck 		r = -EFAULT;
1643d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1644d6712df9SCornelia Huck 			break;
1645d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1646d6712df9SCornelia Huck 		break;
1647d6712df9SCornelia Huck 	}
1648b0c632dbSHeiko Carstens 	default:
16493e6afcf1SCarsten Otte 		r = -ENOTTY;
1650b0c632dbSHeiko Carstens 	}
1651bc923cc9SAvi Kivity 	return r;
1652b0c632dbSHeiko Carstens }
1653b0c632dbSHeiko Carstens 
16545b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
16555b1c1493SCarsten Otte {
16565b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
16575b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
16585b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
16595b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
16605b1c1493SCarsten Otte 		get_page(vmf->page);
16615b1c1493SCarsten Otte 		return 0;
16625b1c1493SCarsten Otte 	}
16635b1c1493SCarsten Otte #endif
16645b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
16655b1c1493SCarsten Otte }
16665b1c1493SCarsten Otte 
16675587027cSAneesh Kumar K.V void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1668db3fe4ebSTakuya Yoshikawa 			   struct kvm_memory_slot *dont)
1669db3fe4ebSTakuya Yoshikawa {
1670db3fe4ebSTakuya Yoshikawa }
1671db3fe4ebSTakuya Yoshikawa 
16725587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
16735587027cSAneesh Kumar K.V 			    unsigned long npages)
1674db3fe4ebSTakuya Yoshikawa {
1675db3fe4ebSTakuya Yoshikawa 	return 0;
1676db3fe4ebSTakuya Yoshikawa }
1677db3fe4ebSTakuya Yoshikawa 
1678e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm)
1679e59dbe09STakuya Yoshikawa {
1680e59dbe09STakuya Yoshikawa }
1681e59dbe09STakuya Yoshikawa 
1682b0c632dbSHeiko Carstens /* Section: memory related */
1683f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1684f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
16857b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
16867b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1687b0c632dbSHeiko Carstens {
1688dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1689dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1690dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1691dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1692b0c632dbSHeiko Carstens 
1693598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1694b0c632dbSHeiko Carstens 		return -EINVAL;
1695b0c632dbSHeiko Carstens 
1696598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1697b0c632dbSHeiko Carstens 		return -EINVAL;
1698b0c632dbSHeiko Carstens 
1699f7784b8eSMarcelo Tosatti 	return 0;
1700f7784b8eSMarcelo Tosatti }
1701f7784b8eSMarcelo Tosatti 
1702f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1703f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
17048482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
17058482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1706f7784b8eSMarcelo Tosatti {
1707f7850c92SCarsten Otte 	int rc;
1708f7784b8eSMarcelo Tosatti 
17092cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
17102cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
17112cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
17122cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
17132cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
17142cef4debSChristian Borntraeger 	 */
17152cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
17162cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
17172cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
17182cef4debSChristian Borntraeger 		return;
1719598841caSCarsten Otte 
1720598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1721598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1722598841caSCarsten Otte 	if (rc)
1723f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1724598841caSCarsten Otte 	return;
1725b0c632dbSHeiko Carstens }
1726b0c632dbSHeiko Carstens 
17272df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm)
17282df72e9bSMarcelo Tosatti {
17292df72e9bSMarcelo Tosatti }
17302df72e9bSMarcelo Tosatti 
17312df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
17322df72e9bSMarcelo Tosatti 				   struct kvm_memory_slot *slot)
173334d4cb8fSMarcelo Tosatti {
173434d4cb8fSMarcelo Tosatti }
173534d4cb8fSMarcelo Tosatti 
1736b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1737b0c632dbSHeiko Carstens {
1738ef50f7acSChristian Borntraeger 	int ret;
17390ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1740ef50f7acSChristian Borntraeger 	if (ret)
1741ef50f7acSChristian Borntraeger 		return ret;
1742ef50f7acSChristian Borntraeger 
1743ef50f7acSChristian Borntraeger 	/*
1744ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
174525985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1746ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1747ef50f7acSChristian Borntraeger 	 */
174878c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
174978c4b59fSMichael Mueller 	if (!vfacilities) {
1750ef50f7acSChristian Borntraeger 		kvm_exit();
1751ef50f7acSChristian Borntraeger 		return -ENOMEM;
1752ef50f7acSChristian Borntraeger 	}
175378c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1754d208c79dSThomas Huth 	vfacilities[0] &= 0xff82fff3f4fc2000UL;
17557feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1756ef50f7acSChristian Borntraeger 	return 0;
1757b0c632dbSHeiko Carstens }
1758b0c632dbSHeiko Carstens 
1759b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1760b0c632dbSHeiko Carstens {
176178c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1762b0c632dbSHeiko Carstens 	kvm_exit();
1763b0c632dbSHeiko Carstens }
1764b0c632dbSHeiko Carstens 
1765b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1766b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1767566af940SCornelia Huck 
1768566af940SCornelia Huck /*
1769566af940SCornelia Huck  * Enable autoloading of the kvm module.
1770566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1771566af940SCornelia Huck  * since x86 takes a different approach.
1772566af940SCornelia Huck  */
1773566af940SCornelia Huck #include <linux/miscdevice.h>
1774566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1775566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1776