xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision d3d692c82e4ed79ae7c85f8825ccfdb7d11819da)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
55aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
57ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
587697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
59ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6669d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
67453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
68453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
69453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
70453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
71453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
728a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
73453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
74453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
75b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
76453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
77453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
78bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
795288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
80bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
817697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
825288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
855288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
87388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
88e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
8941628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
90b0c632dbSHeiko Carstens 	{ NULL }
91b0c632dbSHeiko Carstens };
92b0c632dbSHeiko Carstens 
9378c4b59fSMichael Mueller unsigned long *vfacilities;
942c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
95b0c632dbSHeiko Carstens 
9678c4b59fSMichael Mueller /* test availability of vfacility */
97280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
9878c4b59fSMichael Mueller {
9978c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10078c4b59fSMichael Mueller }
10178c4b59fSMichael Mueller 
102b0c632dbSHeiko Carstens /* Section: not file related */
10310474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage)
104b0c632dbSHeiko Carstens {
105b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
10610474ae8SAlexander Graf 	return 0;
107b0c632dbSHeiko Carstens }
108b0c632dbSHeiko Carstens 
109b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage)
110b0c632dbSHeiko Carstens {
111b0c632dbSHeiko Carstens }
112b0c632dbSHeiko Carstens 
1132c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1142c70fe44SChristian Borntraeger 
115b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
116b0c632dbSHeiko Carstens {
1172c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1182c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
119b0c632dbSHeiko Carstens 	return 0;
120b0c632dbSHeiko Carstens }
121b0c632dbSHeiko Carstens 
122b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
123b0c632dbSHeiko Carstens {
1242c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
125b0c632dbSHeiko Carstens }
126b0c632dbSHeiko Carstens 
127b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn)
128b0c632dbSHeiko Carstens {
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
133b0c632dbSHeiko Carstens 	return 0;
134b0c632dbSHeiko Carstens }
135b0c632dbSHeiko Carstens 
136b0c632dbSHeiko Carstens void kvm_arch_exit(void)
137b0c632dbSHeiko Carstens {
138b0c632dbSHeiko Carstens }
139b0c632dbSHeiko Carstens 
140b0c632dbSHeiko Carstens /* Section: device related */
141b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
142b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
143b0c632dbSHeiko Carstens {
144b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
145b0c632dbSHeiko Carstens 		return s390_enable_sie();
146b0c632dbSHeiko Carstens 	return -EINVAL;
147b0c632dbSHeiko Carstens }
148b0c632dbSHeiko Carstens 
149784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
150b0c632dbSHeiko Carstens {
151d7b0b5ebSCarsten Otte 	int r;
152d7b0b5ebSCarsten Otte 
1532bd0ac4eSCarsten Otte 	switch (ext) {
154d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
155b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15652e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1571efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1581efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1591efd0f59SCarsten Otte #endif
1603c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16160b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
16214eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
163d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
164fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
165ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
167c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
168d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
16978599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
170f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1716352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
172d7b0b5ebSCarsten Otte 		r = 1;
173d7b0b5ebSCarsten Otte 		break;
174e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
175e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
176e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
177e726b1bdSChristian Borntraeger 		break;
178e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
179e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
180e1e2e605SNick Wang 		break;
1811526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
182abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1831526bf9cSChristian Borntraeger 		break;
1842bd0ac4eSCarsten Otte 	default:
185d7b0b5ebSCarsten Otte 		r = 0;
186b0c632dbSHeiko Carstens 	}
187d7b0b5ebSCarsten Otte 	return r;
1882bd0ac4eSCarsten Otte }
189b0c632dbSHeiko Carstens 
19015f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
19115f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
19215f36ebdSJason J. Herne {
19315f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19415f36ebdSJason J. Herne 	unsigned long address;
19515f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19615f36ebdSJason J. Herne 
19715f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19815f36ebdSJason J. Herne 	/* Loop over all guest pages */
19915f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
20015f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
20115f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
20215f36ebdSJason J. Herne 
20315f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20415f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20515f36ebdSJason J. Herne 	}
20615f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20715f36ebdSJason J. Herne }
20815f36ebdSJason J. Herne 
209b0c632dbSHeiko Carstens /* Section: vm related */
210b0c632dbSHeiko Carstens /*
211b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
212b0c632dbSHeiko Carstens  */
213b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
215b0c632dbSHeiko Carstens {
21615f36ebdSJason J. Herne 	int r;
21715f36ebdSJason J. Herne 	unsigned long n;
21815f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21915f36ebdSJason J. Herne 	int is_dirty = 0;
22015f36ebdSJason J. Herne 
22115f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
22215f36ebdSJason J. Herne 
22315f36ebdSJason J. Herne 	r = -EINVAL;
22415f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22515f36ebdSJason J. Herne 		goto out;
22615f36ebdSJason J. Herne 
22715f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22815f36ebdSJason J. Herne 	r = -ENOENT;
22915f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
23015f36ebdSJason J. Herne 		goto out;
23115f36ebdSJason J. Herne 
23215f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23315f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23415f36ebdSJason J. Herne 	if (r)
23515f36ebdSJason J. Herne 		goto out;
23615f36ebdSJason J. Herne 
23715f36ebdSJason J. Herne 	/* Clear the dirty log */
23815f36ebdSJason J. Herne 	if (is_dirty) {
23915f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
24015f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
24115f36ebdSJason J. Herne 	}
24215f36ebdSJason J. Herne 	r = 0;
24315f36ebdSJason J. Herne out:
24415f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24515f36ebdSJason J. Herne 	return r;
246b0c632dbSHeiko Carstens }
247b0c632dbSHeiko Carstens 
248d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249d938dc55SCornelia Huck {
250d938dc55SCornelia Huck 	int r;
251d938dc55SCornelia Huck 
252d938dc55SCornelia Huck 	if (cap->flags)
253d938dc55SCornelia Huck 		return -EINVAL;
254d938dc55SCornelia Huck 
255d938dc55SCornelia Huck 	switch (cap->cap) {
25684223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25784223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25884223598SCornelia Huck 		r = 0;
25984223598SCornelia Huck 		break;
260d938dc55SCornelia Huck 	default:
261d938dc55SCornelia Huck 		r = -EINVAL;
262d938dc55SCornelia Huck 		break;
263d938dc55SCornelia Huck 	}
264d938dc55SCornelia Huck 	return r;
265d938dc55SCornelia Huck }
266d938dc55SCornelia Huck 
2674f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2684f718eabSDominik Dingel {
2694f718eabSDominik Dingel 	int ret;
2704f718eabSDominik Dingel 	unsigned int idx;
2714f718eabSDominik Dingel 	switch (attr->attr) {
2724f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2734f718eabSDominik Dingel 		ret = -EBUSY;
2744f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2754f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2764f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2774f718eabSDominik Dingel 			ret = 0;
2784f718eabSDominik Dingel 		}
2794f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2804f718eabSDominik Dingel 		break;
2814f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
2824f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2834f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
2844f718eabSDominik Dingel 		page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
2854f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
2864f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2874f718eabSDominik Dingel 		ret = 0;
2884f718eabSDominik Dingel 		break;
2894f718eabSDominik Dingel 	default:
2904f718eabSDominik Dingel 		ret = -ENXIO;
2914f718eabSDominik Dingel 		break;
2924f718eabSDominik Dingel 	}
2934f718eabSDominik Dingel 	return ret;
2944f718eabSDominik Dingel }
2954f718eabSDominik Dingel 
296f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297f2061656SDominik Dingel {
298f2061656SDominik Dingel 	int ret;
299f2061656SDominik Dingel 
300f2061656SDominik Dingel 	switch (attr->group) {
3014f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3024f718eabSDominik Dingel 		ret = kvm_s390_mem_control(kvm, attr);
3034f718eabSDominik Dingel 		break;
304f2061656SDominik Dingel 	default:
305f2061656SDominik Dingel 		ret = -ENXIO;
306f2061656SDominik Dingel 		break;
307f2061656SDominik Dingel 	}
308f2061656SDominik Dingel 
309f2061656SDominik Dingel 	return ret;
310f2061656SDominik Dingel }
311f2061656SDominik Dingel 
312f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313f2061656SDominik Dingel {
314f2061656SDominik Dingel 	return -ENXIO;
315f2061656SDominik Dingel }
316f2061656SDominik Dingel 
317f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318f2061656SDominik Dingel {
319f2061656SDominik Dingel 	int ret;
320f2061656SDominik Dingel 
321f2061656SDominik Dingel 	switch (attr->group) {
3224f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3234f718eabSDominik Dingel 		switch (attr->attr) {
3244f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3254f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3264f718eabSDominik Dingel 			ret = 0;
3274f718eabSDominik Dingel 			break;
3284f718eabSDominik Dingel 		default:
3294f718eabSDominik Dingel 			ret = -ENXIO;
3304f718eabSDominik Dingel 			break;
3314f718eabSDominik Dingel 		}
3324f718eabSDominik Dingel 		break;
333f2061656SDominik Dingel 	default:
334f2061656SDominik Dingel 		ret = -ENXIO;
335f2061656SDominik Dingel 		break;
336f2061656SDominik Dingel 	}
337f2061656SDominik Dingel 
338f2061656SDominik Dingel 	return ret;
339f2061656SDominik Dingel }
340f2061656SDominik Dingel 
341b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
342b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
343b0c632dbSHeiko Carstens {
344b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
345b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
346f2061656SDominik Dingel 	struct kvm_device_attr attr;
347b0c632dbSHeiko Carstens 	int r;
348b0c632dbSHeiko Carstens 
349b0c632dbSHeiko Carstens 	switch (ioctl) {
350ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
351ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
352ba5c1e9bSCarsten Otte 
353ba5c1e9bSCarsten Otte 		r = -EFAULT;
354ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
355ba5c1e9bSCarsten Otte 			break;
356ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
357ba5c1e9bSCarsten Otte 		break;
358ba5c1e9bSCarsten Otte 	}
359d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
360d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
361d938dc55SCornelia Huck 		r = -EFAULT;
362d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
363d938dc55SCornelia Huck 			break;
364d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365d938dc55SCornelia Huck 		break;
366d938dc55SCornelia Huck 	}
36784223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
36884223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
36984223598SCornelia Huck 
37084223598SCornelia Huck 		r = -EINVAL;
37184223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
37284223598SCornelia Huck 			/* Set up dummy routing. */
37384223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
37484223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
37584223598SCornelia Huck 			r = 0;
37684223598SCornelia Huck 		}
37784223598SCornelia Huck 		break;
37884223598SCornelia Huck 	}
379f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
380f2061656SDominik Dingel 		r = -EFAULT;
381f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382f2061656SDominik Dingel 			break;
383f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
384f2061656SDominik Dingel 		break;
385f2061656SDominik Dingel 	}
386f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
387f2061656SDominik Dingel 		r = -EFAULT;
388f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389f2061656SDominik Dingel 			break;
390f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
391f2061656SDominik Dingel 		break;
392f2061656SDominik Dingel 	}
393f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
394f2061656SDominik Dingel 		r = -EFAULT;
395f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396f2061656SDominik Dingel 			break;
397f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
398f2061656SDominik Dingel 		break;
399f2061656SDominik Dingel 	}
400b0c632dbSHeiko Carstens 	default:
401367e1319SAvi Kivity 		r = -ENOTTY;
402b0c632dbSHeiko Carstens 	}
403b0c632dbSHeiko Carstens 
404b0c632dbSHeiko Carstens 	return r;
405b0c632dbSHeiko Carstens }
406b0c632dbSHeiko Carstens 
407e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
408b0c632dbSHeiko Carstens {
409b0c632dbSHeiko Carstens 	int rc;
410b0c632dbSHeiko Carstens 	char debug_name[16];
411f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
412b0c632dbSHeiko Carstens 
413e08b9637SCarsten Otte 	rc = -EINVAL;
414e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
415e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
416e08b9637SCarsten Otte 		goto out_err;
417e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418e08b9637SCarsten Otte 		goto out_err;
419e08b9637SCarsten Otte #else
420e08b9637SCarsten Otte 	if (type)
421e08b9637SCarsten Otte 		goto out_err;
422e08b9637SCarsten Otte #endif
423e08b9637SCarsten Otte 
424b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
425b0c632dbSHeiko Carstens 	if (rc)
426d89f5effSJan Kiszka 		goto out_err;
427b0c632dbSHeiko Carstens 
428b290411aSCarsten Otte 	rc = -ENOMEM;
429b290411aSCarsten Otte 
430b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
432d89f5effSJan Kiszka 		goto out_err;
433f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
434f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
435f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
437b0c632dbSHeiko Carstens 
438b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
439b0c632dbSHeiko Carstens 
440b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
442b0c632dbSHeiko Carstens 		goto out_nodbf;
443b0c632dbSHeiko Carstens 
444ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
445ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
4468a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
447ba5c1e9bSCarsten Otte 
448b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
450b0c632dbSHeiko Carstens 
451e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
452e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
453e08b9637SCarsten Otte 	} else {
454598841caSCarsten Otte 		kvm->arch.gmap = gmap_alloc(current->mm);
455598841caSCarsten Otte 		if (!kvm->arch.gmap)
456598841caSCarsten Otte 			goto out_nogmap;
4572c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
45824eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
459e08b9637SCarsten Otte 	}
460fa6b7fe9SCornelia Huck 
461fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
46284223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
463fa6b7fe9SCornelia Huck 
4648ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
4658ad35755SDavid Hildenbrand 
466d89f5effSJan Kiszka 	return 0;
467598841caSCarsten Otte out_nogmap:
468598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
469b0c632dbSHeiko Carstens out_nodbf:
470b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
471d89f5effSJan Kiszka out_err:
472d89f5effSJan Kiszka 	return rc;
473b0c632dbSHeiko Carstens }
474b0c632dbSHeiko Carstens 
475d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476d329c035SChristian Borntraeger {
477d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
478ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
47967335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
4803c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
48158f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
48258f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
48358f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
486abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
48758f9460bSCarsten Otte 	}
488abf4a71eSCarsten Otte 	smp_mb();
48927e0393fSCarsten Otte 
49027e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
49127e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
49227e0393fSCarsten Otte 
493b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
494b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
495d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
496b31288faSKonstantin Weitz 
4976692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
498b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
499d329c035SChristian Borntraeger }
500d329c035SChristian Borntraeger 
501d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
502d329c035SChristian Borntraeger {
503d329c035SChristian Borntraeger 	unsigned int i;
504988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
505d329c035SChristian Borntraeger 
506988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
507988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
508988a2caeSGleb Natapov 
509988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
510988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
512988a2caeSGleb Natapov 
513988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
514988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
515d329c035SChristian Borntraeger }
516d329c035SChristian Borntraeger 
517ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm)
518ad8ba2cdSSheng Yang {
519ad8ba2cdSSheng Yang }
520ad8ba2cdSSheng Yang 
521b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
522b0c632dbSHeiko Carstens {
523d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
524b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
525d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
52627e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
527598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
528841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
52967335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
530b0c632dbSHeiko Carstens }
531b0c632dbSHeiko Carstens 
532b0c632dbSHeiko Carstens /* Section: vcpu related */
533b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534b0c632dbSHeiko Carstens {
5353c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5363c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
53727e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm)) {
53827e0393fSCarsten Otte 		vcpu->arch.gmap = gmap_alloc(current->mm);
53927e0393fSCarsten Otte 		if (!vcpu->arch.gmap)
54027e0393fSCarsten Otte 			return -ENOMEM;
5412c70fe44SChristian Borntraeger 		vcpu->arch.gmap->private = vcpu->kvm;
54227e0393fSCarsten Otte 		return 0;
54327e0393fSCarsten Otte 	}
54427e0393fSCarsten Otte 
545598841caSCarsten Otte 	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
54659674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
54759674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
5489eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
549b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
550b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
551b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
552b0c632dbSHeiko Carstens 	return 0;
553b0c632dbSHeiko Carstens }
554b0c632dbSHeiko Carstens 
555b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
556b0c632dbSHeiko Carstens {
5576692cef3SChristian Borntraeger 	/* Nothing todo */
558b0c632dbSHeiko Carstens }
559b0c632dbSHeiko Carstens 
560b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
561b0c632dbSHeiko Carstens {
5624725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5634725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
564b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
5654725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5664725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
56759674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
568480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
5699e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
570b0c632dbSHeiko Carstens }
571b0c632dbSHeiko Carstens 
572b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
573b0c632dbSHeiko Carstens {
5749e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
575480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
5764725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5774725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
57859674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
5794725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5804725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
581b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
582b0c632dbSHeiko Carstens }
583b0c632dbSHeiko Carstens 
584b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
585b0c632dbSHeiko Carstens {
586b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
587b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
588b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
5898d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
590b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
591b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
592b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
593b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
594b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
595b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
596b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
597b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
598b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
599672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
6003c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
6013c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
6026352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
6036852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
6042ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
605b0c632dbSHeiko Carstens }
606b0c632dbSHeiko Carstens 
60742897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
60842897d86SMarcelo Tosatti {
60942897d86SMarcelo Tosatti 	return 0;
61042897d86SMarcelo Tosatti }
61142897d86SMarcelo Tosatti 
612b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
613b31605c1SDominik Dingel {
614b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
615b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
616b31605c1SDominik Dingel }
617b31605c1SDominik Dingel 
618b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
619b31605c1SDominik Dingel {
620b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
621b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
622b31605c1SDominik Dingel 		return -ENOMEM;
623b31605c1SDominik Dingel 
624b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
625b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
626b31605c1SDominik Dingel 	return 0;
627b31605c1SDominik Dingel }
628b31605c1SDominik Dingel 
629b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
630b0c632dbSHeiko Carstens {
631b31605c1SDominik Dingel 	int rc = 0;
632b31288faSKonstantin Weitz 
6339e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
6349e6dabefSCornelia Huck 						    CPUSTAT_SM |
63569d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
63669d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
637fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
6387feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
6397feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
6407feb6bb8SMichael Mueller 
64169d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
6424953919fSDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xD1002000U;
643217a4406SHeiko Carstens 	if (sclp_has_siif())
644217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
64578c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
6465a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
6475a5e6536SMatthew Rosato 				      ICTL_TPROT;
6485a5e6536SMatthew Rosato 
649b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
650b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
651b31605c1SDominik Dingel 		if (rc)
652b31605c1SDominik Dingel 			return rc;
653b31288faSKonstantin Weitz 	}
654ca872302SChristian Borntraeger 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
655ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
656453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
65792e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
658b31605c1SDominik Dingel 	return rc;
659b0c632dbSHeiko Carstens }
660b0c632dbSHeiko Carstens 
661b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
662b0c632dbSHeiko Carstens 				      unsigned int id)
663b0c632dbSHeiko Carstens {
6644d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
6657feb6bb8SMichael Mueller 	struct sie_page *sie_page;
6664d47555aSCarsten Otte 	int rc = -EINVAL;
667b0c632dbSHeiko Carstens 
6684d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
6694d47555aSCarsten Otte 		goto out;
6704d47555aSCarsten Otte 
6714d47555aSCarsten Otte 	rc = -ENOMEM;
6724d47555aSCarsten Otte 
673b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
674b0c632dbSHeiko Carstens 	if (!vcpu)
6754d47555aSCarsten Otte 		goto out;
676b0c632dbSHeiko Carstens 
6777feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
6787feb6bb8SMichael Mueller 	if (!sie_page)
679b0c632dbSHeiko Carstens 		goto out_free_cpu;
680b0c632dbSHeiko Carstens 
6817feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
6827feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
6837feb6bb8SMichael Mueller 
684b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
68558f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
68658f9460bSCarsten Otte 		if (!kvm->arch.sca) {
68758f9460bSCarsten Otte 			WARN_ON_ONCE(1);
68858f9460bSCarsten Otte 			goto out_free_cpu;
68958f9460bSCarsten Otte 		}
690abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
69158f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
69258f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
69358f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
69458f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
695b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
696fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
69758f9460bSCarsten Otte 	}
698b0c632dbSHeiko Carstens 
699ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
700ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
701ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
702d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
7035288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
704ba5c1e9bSCarsten Otte 
705b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
706b0c632dbSHeiko Carstens 	if (rc)
7077b06bf2fSWei Yongjun 		goto out_free_sie_block;
708b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
709b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
710ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
711b0c632dbSHeiko Carstens 
712b0c632dbSHeiko Carstens 	return vcpu;
7137b06bf2fSWei Yongjun out_free_sie_block:
7147b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
715b0c632dbSHeiko Carstens out_free_cpu:
716b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
7174d47555aSCarsten Otte out:
718b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
719b0c632dbSHeiko Carstens }
720b0c632dbSHeiko Carstens 
721b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
722b0c632dbSHeiko Carstens {
723f87618e8SMichael Mueller 	return kvm_cpu_has_interrupt(vcpu);
724b0c632dbSHeiko Carstens }
725b0c632dbSHeiko Carstens 
72649b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
72749b99e1eSChristian Borntraeger {
72849b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
72949b99e1eSChristian Borntraeger }
73049b99e1eSChristian Borntraeger 
73149b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
73249b99e1eSChristian Borntraeger {
73349b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
73449b99e1eSChristian Borntraeger }
73549b99e1eSChristian Borntraeger 
73649b99e1eSChristian Borntraeger /*
73749b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
73849b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
73949b99e1eSChristian Borntraeger  * return immediately. */
74049b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
74149b99e1eSChristian Borntraeger {
74249b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
74349b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
74449b99e1eSChristian Borntraeger 		cpu_relax();
74549b99e1eSChristian Borntraeger }
74649b99e1eSChristian Borntraeger 
74749b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
74849b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
74949b99e1eSChristian Borntraeger {
75049b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
75149b99e1eSChristian Borntraeger 	exit_sie(vcpu);
75249b99e1eSChristian Borntraeger }
75349b99e1eSChristian Borntraeger 
7542c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
7552c70fe44SChristian Borntraeger {
7562c70fe44SChristian Borntraeger 	int i;
7572c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
7582c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
7592c70fe44SChristian Borntraeger 
7602c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
7612c70fe44SChristian Borntraeger 		/* match against both prefix pages */
762fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
7632c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
7642c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
7652c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
7662c70fe44SChristian Borntraeger 		}
7672c70fe44SChristian Borntraeger 	}
7682c70fe44SChristian Borntraeger }
7692c70fe44SChristian Borntraeger 
770b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
771b6d33834SChristoffer Dall {
772b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
773b6d33834SChristoffer Dall 	BUG();
774b6d33834SChristoffer Dall 	return 0;
775b6d33834SChristoffer Dall }
776b6d33834SChristoffer Dall 
77714eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
77814eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
77914eebd91SCarsten Otte {
78014eebd91SCarsten Otte 	int r = -EINVAL;
78114eebd91SCarsten Otte 
78214eebd91SCarsten Otte 	switch (reg->id) {
78329b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
78429b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
78529b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
78629b7c71bSCarsten Otte 		break;
78729b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
78829b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
78929b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
79029b7c71bSCarsten Otte 		break;
79146a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
79246a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
79346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
79446a6dd1cSJason J. herne 		break;
79546a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
79646a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
79746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
79846a6dd1cSJason J. herne 		break;
799536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
800536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
801536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
802536336c2SDominik Dingel 		break;
803536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
804536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
805536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
806536336c2SDominik Dingel 		break;
807536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
808536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
809536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
810536336c2SDominik Dingel 		break;
811672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
812672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
813672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
814672550fbSChristian Borntraeger 		break;
815afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
816afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
817afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
818afa45ff5SChristian Borntraeger 		break;
81914eebd91SCarsten Otte 	default:
82014eebd91SCarsten Otte 		break;
82114eebd91SCarsten Otte 	}
82214eebd91SCarsten Otte 
82314eebd91SCarsten Otte 	return r;
82414eebd91SCarsten Otte }
82514eebd91SCarsten Otte 
82614eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
82714eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
82814eebd91SCarsten Otte {
82914eebd91SCarsten Otte 	int r = -EINVAL;
83014eebd91SCarsten Otte 
83114eebd91SCarsten Otte 	switch (reg->id) {
83229b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
83329b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
83429b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
83529b7c71bSCarsten Otte 		break;
83629b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
83729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
83829b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
83929b7c71bSCarsten Otte 		break;
84046a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
84146a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
84246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
84346a6dd1cSJason J. herne 		break;
84446a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
84546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
84646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
84746a6dd1cSJason J. herne 		break;
848536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
849536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
850536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
851536336c2SDominik Dingel 		break;
852536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
853536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
854536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
855536336c2SDominik Dingel 		break;
856536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
857536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
858536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
859536336c2SDominik Dingel 		break;
860672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
861672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
862672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
863672550fbSChristian Borntraeger 		break;
864afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
865afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
866afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
867afa45ff5SChristian Borntraeger 		break;
86814eebd91SCarsten Otte 	default:
86914eebd91SCarsten Otte 		break;
87014eebd91SCarsten Otte 	}
87114eebd91SCarsten Otte 
87214eebd91SCarsten Otte 	return r;
87314eebd91SCarsten Otte }
874b6d33834SChristoffer Dall 
875b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
876b0c632dbSHeiko Carstens {
877b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
878b0c632dbSHeiko Carstens 	return 0;
879b0c632dbSHeiko Carstens }
880b0c632dbSHeiko Carstens 
881b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
882b0c632dbSHeiko Carstens {
8835a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
884b0c632dbSHeiko Carstens 	return 0;
885b0c632dbSHeiko Carstens }
886b0c632dbSHeiko Carstens 
887b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
888b0c632dbSHeiko Carstens {
8895a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
890b0c632dbSHeiko Carstens 	return 0;
891b0c632dbSHeiko Carstens }
892b0c632dbSHeiko Carstens 
893b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
894b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
895b0c632dbSHeiko Carstens {
89659674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
897b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
89859674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
899b0c632dbSHeiko Carstens 	return 0;
900b0c632dbSHeiko Carstens }
901b0c632dbSHeiko Carstens 
902b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
903b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
904b0c632dbSHeiko Carstens {
90559674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
906b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
907b0c632dbSHeiko Carstens 	return 0;
908b0c632dbSHeiko Carstens }
909b0c632dbSHeiko Carstens 
910b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
911b0c632dbSHeiko Carstens {
9124725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
9134725c860SMartin Schwidefsky 		return -EINVAL;
914b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
9154725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
9164725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
9174725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
918b0c632dbSHeiko Carstens 	return 0;
919b0c632dbSHeiko Carstens }
920b0c632dbSHeiko Carstens 
921b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
922b0c632dbSHeiko Carstens {
923b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
924b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
925b0c632dbSHeiko Carstens 	return 0;
926b0c632dbSHeiko Carstens }
927b0c632dbSHeiko Carstens 
928b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
929b0c632dbSHeiko Carstens {
930b0c632dbSHeiko Carstens 	int rc = 0;
931b0c632dbSHeiko Carstens 
9327a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
933b0c632dbSHeiko Carstens 		rc = -EBUSY;
934d7b0b5ebSCarsten Otte 	else {
935d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
936d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
937d7b0b5ebSCarsten Otte 	}
938b0c632dbSHeiko Carstens 	return rc;
939b0c632dbSHeiko Carstens }
940b0c632dbSHeiko Carstens 
941b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
942b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
943b0c632dbSHeiko Carstens {
944b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
945b0c632dbSHeiko Carstens }
946b0c632dbSHeiko Carstens 
94727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
94827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
94927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
95027291e21SDavid Hildenbrand 
951d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
952d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
953b0c632dbSHeiko Carstens {
95427291e21SDavid Hildenbrand 	int rc = 0;
95527291e21SDavid Hildenbrand 
95627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
95727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
95827291e21SDavid Hildenbrand 
9592de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
96027291e21SDavid Hildenbrand 		return -EINVAL;
96127291e21SDavid Hildenbrand 
96227291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
96327291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
96427291e21SDavid Hildenbrand 		/* enforce guest PER */
96527291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
96627291e21SDavid Hildenbrand 
96727291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
96827291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
96927291e21SDavid Hildenbrand 	} else {
97027291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
97127291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
97227291e21SDavid Hildenbrand 	}
97327291e21SDavid Hildenbrand 
97427291e21SDavid Hildenbrand 	if (rc) {
97527291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
97627291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
97727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
97827291e21SDavid Hildenbrand 	}
97927291e21SDavid Hildenbrand 
98027291e21SDavid Hildenbrand 	return rc;
981b0c632dbSHeiko Carstens }
982b0c632dbSHeiko Carstens 
98362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
98462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
98562d9f0dbSMarcelo Tosatti {
9866352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
9876352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
9886352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
98962d9f0dbSMarcelo Tosatti }
99062d9f0dbSMarcelo Tosatti 
99162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
99262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
99362d9f0dbSMarcelo Tosatti {
9946352e4d2SDavid Hildenbrand 	int rc = 0;
9956352e4d2SDavid Hildenbrand 
9966352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
9976352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
9986352e4d2SDavid Hildenbrand 
9996352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
10006352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
10016352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
10026352e4d2SDavid Hildenbrand 		break;
10036352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
10046352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
10056352e4d2SDavid Hildenbrand 		break;
10066352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
10076352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
10086352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
10096352e4d2SDavid Hildenbrand 	default:
10106352e4d2SDavid Hildenbrand 		rc = -ENXIO;
10116352e4d2SDavid Hildenbrand 	}
10126352e4d2SDavid Hildenbrand 
10136352e4d2SDavid Hildenbrand 	return rc;
101462d9f0dbSMarcelo Tosatti }
101562d9f0dbSMarcelo Tosatti 
1016b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1017b31605c1SDominik Dingel {
1018b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1019b31605c1SDominik Dingel 		return false;
1020b31605c1SDominik Dingel 	/* only enable for z10 and later */
1021b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1022b31605c1SDominik Dingel 		return false;
1023b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1024b31605c1SDominik Dingel 		return false;
1025b31605c1SDominik Dingel 	return true;
1026b31605c1SDominik Dingel }
1027b31605c1SDominik Dingel 
10288ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
10298ad35755SDavid Hildenbrand {
10308ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
10318ad35755SDavid Hildenbrand }
10328ad35755SDavid Hildenbrand 
10332c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
10342c70fe44SChristian Borntraeger {
10358ad35755SDavid Hildenbrand retry:
10368ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
10372c70fe44SChristian Borntraeger 	/*
10382c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
10392c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
10402c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
10412c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
10422c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
10432c70fe44SChristian Borntraeger 	 */
10448ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
10452c70fe44SChristian Borntraeger 		int rc;
10462c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1047fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
10482c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
10492c70fe44SChristian Borntraeger 		if (rc)
10502c70fe44SChristian Borntraeger 			return rc;
10518ad35755SDavid Hildenbrand 		goto retry;
10522c70fe44SChristian Borntraeger 	}
10538ad35755SDavid Hildenbrand 
1054*d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1055*d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1056*d3d692c8SDavid Hildenbrand 		goto retry;
1057*d3d692c8SDavid Hildenbrand 	}
1058*d3d692c8SDavid Hildenbrand 
10598ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
10608ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
10618ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
10628ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
10638ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
10648ad35755SDavid Hildenbrand 		}
10658ad35755SDavid Hildenbrand 		goto retry;
10668ad35755SDavid Hildenbrand 	}
10678ad35755SDavid Hildenbrand 
10688ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
10698ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
10708ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
10718ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
10728ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
10738ad35755SDavid Hildenbrand 		}
10748ad35755SDavid Hildenbrand 		goto retry;
10758ad35755SDavid Hildenbrand 	}
10768ad35755SDavid Hildenbrand 
10770759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
10780759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
10790759d068SDavid Hildenbrand 
10802c70fe44SChristian Borntraeger 	return 0;
10812c70fe44SChristian Borntraeger }
10822c70fe44SChristian Borntraeger 
1083fa576c58SThomas Huth /**
1084fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1085fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1086fa576c58SThomas Huth  * @gpa: Guest physical address
1087fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1088fa576c58SThomas Huth  *
1089fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1090fa576c58SThomas Huth  *
1091fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1092fa576c58SThomas Huth  */
1093fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
109424eb3a82SDominik Dingel {
109524eb3a82SDominik Dingel 	struct mm_struct *mm = current->mm;
1096fa576c58SThomas Huth 	hva_t hva;
1097fa576c58SThomas Huth 	long rc;
1098fa576c58SThomas Huth 
1099fa576c58SThomas Huth 	hva = gmap_fault(gpa, vcpu->arch.gmap);
1100fa576c58SThomas Huth 	if (IS_ERR_VALUE(hva))
1101fa576c58SThomas Huth 		return (long)hva;
110224eb3a82SDominik Dingel 	down_read(&mm->mmap_sem);
1103fa576c58SThomas Huth 	rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
110424eb3a82SDominik Dingel 	up_read(&mm->mmap_sem);
1105fa576c58SThomas Huth 
1106fa576c58SThomas Huth 	return rc < 0 ? rc : 0;
110724eb3a82SDominik Dingel }
110824eb3a82SDominik Dingel 
11093c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
11103c038e6bSDominik Dingel 				      unsigned long token)
11113c038e6bSDominik Dingel {
11123c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
11133c038e6bSDominik Dingel 	inti.parm64 = token;
11143c038e6bSDominik Dingel 
11153c038e6bSDominik Dingel 	if (start_token) {
11163c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_INIT;
11173c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
11183c038e6bSDominik Dingel 	} else {
11193c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
11203c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
11213c038e6bSDominik Dingel 	}
11223c038e6bSDominik Dingel }
11233c038e6bSDominik Dingel 
11243c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
11253c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
11263c038e6bSDominik Dingel {
11273c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
11283c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
11293c038e6bSDominik Dingel }
11303c038e6bSDominik Dingel 
11313c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
11323c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
11333c038e6bSDominik Dingel {
11343c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
11353c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
11363c038e6bSDominik Dingel }
11373c038e6bSDominik Dingel 
11383c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
11393c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
11403c038e6bSDominik Dingel {
11413c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
11423c038e6bSDominik Dingel }
11433c038e6bSDominik Dingel 
11443c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
11453c038e6bSDominik Dingel {
11463c038e6bSDominik Dingel 	/*
11473c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
11483c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
11493c038e6bSDominik Dingel 	 */
11503c038e6bSDominik Dingel 	return true;
11513c038e6bSDominik Dingel }
11523c038e6bSDominik Dingel 
11533c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
11543c038e6bSDominik Dingel {
11553c038e6bSDominik Dingel 	hva_t hva;
11563c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
11573c038e6bSDominik Dingel 	int rc;
11583c038e6bSDominik Dingel 
11593c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
11603c038e6bSDominik Dingel 		return 0;
11613c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
11623c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
11633c038e6bSDominik Dingel 		return 0;
11643c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
11653c038e6bSDominik Dingel 		return 0;
11663c038e6bSDominik Dingel 	if (kvm_cpu_has_interrupt(vcpu))
11673c038e6bSDominik Dingel 		return 0;
11683c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
11693c038e6bSDominik Dingel 		return 0;
11703c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
11713c038e6bSDominik Dingel 		return 0;
11723c038e6bSDominik Dingel 
117381480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
117481480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
117581480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
11763c038e6bSDominik Dingel 		return 0;
11773c038e6bSDominik Dingel 
11783c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
11793c038e6bSDominik Dingel 	return rc;
11803c038e6bSDominik Dingel }
11813c038e6bSDominik Dingel 
11823fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1183b0c632dbSHeiko Carstens {
11843fb4c40fSThomas Huth 	int rc, cpuflags;
1185e168bf8dSCarsten Otte 
11863c038e6bSDominik Dingel 	/*
11873c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
11883c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
11893c038e6bSDominik Dingel 	 * handled outside the worker.
11903c038e6bSDominik Dingel 	 */
11913c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
11923c038e6bSDominik Dingel 
11935a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1194b0c632dbSHeiko Carstens 
1195b0c632dbSHeiko Carstens 	if (need_resched())
1196b0c632dbSHeiko Carstens 		schedule();
1197b0c632dbSHeiko Carstens 
1198d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
119971cde587SChristian Borntraeger 		s390_handle_mcck();
120071cde587SChristian Borntraeger 
1201d6b6d166SCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm))
12020ff31867SCarsten Otte 		kvm_s390_deliver_pending_interrupts(vcpu);
12030ff31867SCarsten Otte 
12042c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
12052c70fe44SChristian Borntraeger 	if (rc)
12062c70fe44SChristian Borntraeger 		return rc;
12072c70fe44SChristian Borntraeger 
120827291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
120927291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
121027291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
121127291e21SDavid Hildenbrand 	}
121227291e21SDavid Hildenbrand 
1213b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
12143fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
12153fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
12163fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
12172b29a9fdSDominik Dingel 
12183fb4c40fSThomas Huth 	return 0;
12193fb4c40fSThomas Huth }
12203fb4c40fSThomas Huth 
12213fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
12223fb4c40fSThomas Huth {
122324eb3a82SDominik Dingel 	int rc = -1;
12242b29a9fdSDominik Dingel 
12252b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
12262b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
12272b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
12282b29a9fdSDominik Dingel 
122927291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
123027291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
123127291e21SDavid Hildenbrand 
12323fb4c40fSThomas Huth 	if (exit_reason >= 0) {
12337c470539SMartin Schwidefsky 		rc = 0;
1234210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1235210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1236210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1237210b1607SThomas Huth 						current->thread.gmap_addr;
1238210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1239210b1607SThomas Huth 		rc = -EREMOTE;
124024eb3a82SDominik Dingel 
124124eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
12423c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
124324eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1244fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
124524eb3a82SDominik Dingel 			rc = 0;
1246fa576c58SThomas Huth 		} else {
1247fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1248fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1249fa576c58SThomas Huth 		}
125024eb3a82SDominik Dingel 	}
125124eb3a82SDominik Dingel 
125224eb3a82SDominik Dingel 	if (rc == -1) {
1253699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1254699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1255699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
12561f0d0f09SCarsten Otte 	}
1257b0c632dbSHeiko Carstens 
12585a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
12593fb4c40fSThomas Huth 
1260a76ccff6SThomas Huth 	if (rc == 0) {
1261a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
12622955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
12632955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1264a76ccff6SThomas Huth 		else
1265a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1266a76ccff6SThomas Huth 	}
1267a76ccff6SThomas Huth 
12683fb4c40fSThomas Huth 	return rc;
12693fb4c40fSThomas Huth }
12703fb4c40fSThomas Huth 
12713fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
12723fb4c40fSThomas Huth {
12733fb4c40fSThomas Huth 	int rc, exit_reason;
12743fb4c40fSThomas Huth 
1275800c1065SThomas Huth 	/*
1276800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1277800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1278800c1065SThomas Huth 	 */
1279800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1280800c1065SThomas Huth 
1281a76ccff6SThomas Huth 	do {
12823fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
12833fb4c40fSThomas Huth 		if (rc)
1284a76ccff6SThomas Huth 			break;
12853fb4c40fSThomas Huth 
1286800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
12873fb4c40fSThomas Huth 		/*
1288a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1289a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
12903fb4c40fSThomas Huth 		 */
12913fb4c40fSThomas Huth 		preempt_disable();
12923fb4c40fSThomas Huth 		kvm_guest_enter();
12933fb4c40fSThomas Huth 		preempt_enable();
1294a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1295a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
12963fb4c40fSThomas Huth 		kvm_guest_exit();
1297800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
12983fb4c40fSThomas Huth 
12993fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
130027291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
13013fb4c40fSThomas Huth 
1302800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1303e168bf8dSCarsten Otte 	return rc;
1304b0c632dbSHeiko Carstens }
1305b0c632dbSHeiko Carstens 
1306b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1307b028ee3eSDavid Hildenbrand {
1308b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1309b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1310b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1311b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1312b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1313b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1314*d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1315*d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1316b028ee3eSDavid Hildenbrand 	}
1317b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1318b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1319b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1320b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1321b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1322b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1323b028ee3eSDavid Hildenbrand 	}
1324b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1325b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1326b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1327b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1328b028ee3eSDavid Hildenbrand 	}
1329b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1330b028ee3eSDavid Hildenbrand }
1331b028ee3eSDavid Hildenbrand 
1332b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1333b028ee3eSDavid Hildenbrand {
1334b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1335b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1336b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1337b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1338b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1339b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1340b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1341b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1342b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1343b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1344b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1345b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1346b028ee3eSDavid Hildenbrand }
1347b028ee3eSDavid Hildenbrand 
1348b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1349b0c632dbSHeiko Carstens {
13508f2abe6aSChristian Borntraeger 	int rc;
1351b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1352b0c632dbSHeiko Carstens 
135327291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
135427291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
135527291e21SDavid Hildenbrand 		return 0;
135627291e21SDavid Hildenbrand 	}
135727291e21SDavid Hildenbrand 
1358b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1359b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1360b0c632dbSHeiko Carstens 
13616352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
13626852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
13636352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
13646352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
13656352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
13666352e4d2SDavid Hildenbrand 		return -EINVAL;
13676352e4d2SDavid Hildenbrand 	}
1368b0c632dbSHeiko Carstens 
1369b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1370d7b0b5ebSCarsten Otte 
1371dab4079dSHeiko Carstens 	might_fault();
1372e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
13739ace903dSChristian Ehrhardt 
1374b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1375b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
13768f2abe6aSChristian Borntraeger 		rc = -EINTR;
1377b1d16c49SChristian Ehrhardt 	}
13788f2abe6aSChristian Borntraeger 
137927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
138027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
138127291e21SDavid Hildenbrand 		rc = 0;
138227291e21SDavid Hildenbrand 	}
138327291e21SDavid Hildenbrand 
1384b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
13858f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
13868f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
13878f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
13888f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
13898f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
13908f2abe6aSChristian Borntraeger 		rc = 0;
13918f2abe6aSChristian Borntraeger 	}
13928f2abe6aSChristian Borntraeger 
13938f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
13948f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
13958f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
13968f2abe6aSChristian Borntraeger 		rc = 0;
13978f2abe6aSChristian Borntraeger 	}
13988f2abe6aSChristian Borntraeger 
1399b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1400d7b0b5ebSCarsten Otte 
1401b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1402b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1403b0c632dbSHeiko Carstens 
1404b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
14057e8e6ab4SHeiko Carstens 	return rc;
1406b0c632dbSHeiko Carstens }
1407b0c632dbSHeiko Carstens 
1408b0c632dbSHeiko Carstens /*
1409b0c632dbSHeiko Carstens  * store status at address
1410b0c632dbSHeiko Carstens  * we use have two special cases:
1411b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1412b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1413b0c632dbSHeiko Carstens  */
1414d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1415b0c632dbSHeiko Carstens {
1416092670cdSCarsten Otte 	unsigned char archmode = 1;
1417fda902cbSMichael Mueller 	unsigned int px;
1418178bd789SThomas Huth 	u64 clkcomp;
1419d0bce605SHeiko Carstens 	int rc;
1420b0c632dbSHeiko Carstens 
1421d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1422d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1423b0c632dbSHeiko Carstens 			return -EFAULT;
1424d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1425d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1426d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1427b0c632dbSHeiko Carstens 			return -EFAULT;
1428d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1429d0bce605SHeiko Carstens 	}
1430d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1431d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1432d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1433d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1434d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1435d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1436fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1437d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1438fda902cbSMichael Mueller 			      &px, 4);
1439d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1440d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1441d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1442d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1443d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1444d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1445d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1446178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1447d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1448d0bce605SHeiko Carstens 			      &clkcomp, 8);
1449d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1450d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1451d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1452d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1453d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1454b0c632dbSHeiko Carstens }
1455b0c632dbSHeiko Carstens 
1456e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1457e879892cSThomas Huth {
1458e879892cSThomas Huth 	/*
1459e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1460e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1461e879892cSThomas Huth 	 * it into the save area
1462e879892cSThomas Huth 	 */
1463e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1464e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1465e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1466e879892cSThomas Huth 
1467e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1468e879892cSThomas Huth }
1469e879892cSThomas Huth 
14708ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14718ad35755SDavid Hildenbrand {
14728ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
14738ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
14748ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14758ad35755SDavid Hildenbrand }
14768ad35755SDavid Hildenbrand 
14778ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
14788ad35755SDavid Hildenbrand {
14798ad35755SDavid Hildenbrand 	unsigned int i;
14808ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
14818ad35755SDavid Hildenbrand 
14828ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
14838ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
14848ad35755SDavid Hildenbrand 	}
14858ad35755SDavid Hildenbrand }
14868ad35755SDavid Hildenbrand 
14878ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14888ad35755SDavid Hildenbrand {
14898ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
14908ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
14918ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14928ad35755SDavid Hildenbrand }
14938ad35755SDavid Hildenbrand 
14946852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
14956852d7b6SDavid Hildenbrand {
14968ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
14978ad35755SDavid Hildenbrand 
14988ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
14998ad35755SDavid Hildenbrand 		return;
15008ad35755SDavid Hildenbrand 
15016852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
15028ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1503433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15048ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15058ad35755SDavid Hildenbrand 
15068ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15078ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
15088ad35755SDavid Hildenbrand 			started_vcpus++;
15098ad35755SDavid Hildenbrand 	}
15108ad35755SDavid Hildenbrand 
15118ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
15128ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
15138ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
15148ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
15158ad35755SDavid Hildenbrand 		/*
15168ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
15178ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
15188ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
15198ad35755SDavid Hildenbrand 		 */
15208ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
15218ad35755SDavid Hildenbrand 	}
15228ad35755SDavid Hildenbrand 
15236852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
15248ad35755SDavid Hildenbrand 	/*
15258ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
15268ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
15278ad35755SDavid Hildenbrand 	 */
1528*d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1529433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
15308ad35755SDavid Hildenbrand 	return;
15316852d7b6SDavid Hildenbrand }
15326852d7b6SDavid Hildenbrand 
15336852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
15346852d7b6SDavid Hildenbrand {
15358ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15368ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
15378ad35755SDavid Hildenbrand 
15388ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
15398ad35755SDavid Hildenbrand 		return;
15408ad35755SDavid Hildenbrand 
15416852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
15428ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1543433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15448ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15458ad35755SDavid Hildenbrand 
154632f5ff63SDavid Hildenbrand 	/* Need to lock access to action_bits to avoid a SIGP race condition */
15474ae3c081SDavid Hildenbrand 	spin_lock(&vcpu->arch.local_int.lock);
15486852d7b6SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
154932f5ff63SDavid Hildenbrand 
155032f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
155132f5ff63SDavid Hildenbrand 	vcpu->arch.local_int.action_bits &=
155232f5ff63SDavid Hildenbrand 				 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
15534ae3c081SDavid Hildenbrand 	spin_unlock(&vcpu->arch.local_int.lock);
155432f5ff63SDavid Hildenbrand 
15558ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
15568ad35755SDavid Hildenbrand 
15578ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15588ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
15598ad35755SDavid Hildenbrand 			started_vcpus++;
15608ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
15618ad35755SDavid Hildenbrand 		}
15628ad35755SDavid Hildenbrand 	}
15638ad35755SDavid Hildenbrand 
15648ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
15658ad35755SDavid Hildenbrand 		/*
15668ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
15678ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
15688ad35755SDavid Hildenbrand 		 */
15698ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
15708ad35755SDavid Hildenbrand 	}
15718ad35755SDavid Hildenbrand 
1572433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
15738ad35755SDavid Hildenbrand 	return;
15746852d7b6SDavid Hildenbrand }
15756852d7b6SDavid Hildenbrand 
1576d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1577d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1578d6712df9SCornelia Huck {
1579d6712df9SCornelia Huck 	int r;
1580d6712df9SCornelia Huck 
1581d6712df9SCornelia Huck 	if (cap->flags)
1582d6712df9SCornelia Huck 		return -EINVAL;
1583d6712df9SCornelia Huck 
1584d6712df9SCornelia Huck 	switch (cap->cap) {
1585fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1586fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1587fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1588fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1589fa6b7fe9SCornelia Huck 		}
1590fa6b7fe9SCornelia Huck 		r = 0;
1591fa6b7fe9SCornelia Huck 		break;
1592d6712df9SCornelia Huck 	default:
1593d6712df9SCornelia Huck 		r = -EINVAL;
1594d6712df9SCornelia Huck 		break;
1595d6712df9SCornelia Huck 	}
1596d6712df9SCornelia Huck 	return r;
1597d6712df9SCornelia Huck }
1598d6712df9SCornelia Huck 
1599b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1600b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1601b0c632dbSHeiko Carstens {
1602b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1603b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1604800c1065SThomas Huth 	int idx;
1605bc923cc9SAvi Kivity 	long r;
1606b0c632dbSHeiko Carstens 
160793736624SAvi Kivity 	switch (ioctl) {
160893736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1609ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1610ba5c1e9bSCarsten Otte 
161193736624SAvi Kivity 		r = -EFAULT;
1612ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
161393736624SAvi Kivity 			break;
161493736624SAvi Kivity 		r = kvm_s390_inject_vcpu(vcpu, &s390int);
161593736624SAvi Kivity 		break;
1616ba5c1e9bSCarsten Otte 	}
1617b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1618800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1619bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1620800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1621bc923cc9SAvi Kivity 		break;
1622b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1623b0c632dbSHeiko Carstens 		psw_t psw;
1624b0c632dbSHeiko Carstens 
1625bc923cc9SAvi Kivity 		r = -EFAULT;
1626b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1627bc923cc9SAvi Kivity 			break;
1628bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1629bc923cc9SAvi Kivity 		break;
1630b0c632dbSHeiko Carstens 	}
1631b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1632bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1633bc923cc9SAvi Kivity 		break;
163414eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
163514eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
163614eebd91SCarsten Otte 		struct kvm_one_reg reg;
163714eebd91SCarsten Otte 		r = -EFAULT;
163814eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
163914eebd91SCarsten Otte 			break;
164014eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
164114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
164214eebd91SCarsten Otte 		else
164314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
164414eebd91SCarsten Otte 		break;
164514eebd91SCarsten Otte 	}
164627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
164727e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
164827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
164927e0393fSCarsten Otte 
165027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
165127e0393fSCarsten Otte 			r = -EFAULT;
165227e0393fSCarsten Otte 			break;
165327e0393fSCarsten Otte 		}
165427e0393fSCarsten Otte 
165527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
165627e0393fSCarsten Otte 			r = -EINVAL;
165727e0393fSCarsten Otte 			break;
165827e0393fSCarsten Otte 		}
165927e0393fSCarsten Otte 
166027e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
166127e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
166227e0393fSCarsten Otte 		break;
166327e0393fSCarsten Otte 	}
166427e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
166527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
166627e0393fSCarsten Otte 
166727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
166827e0393fSCarsten Otte 			r = -EFAULT;
166927e0393fSCarsten Otte 			break;
167027e0393fSCarsten Otte 		}
167127e0393fSCarsten Otte 
167227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
167327e0393fSCarsten Otte 			r = -EINVAL;
167427e0393fSCarsten Otte 			break;
167527e0393fSCarsten Otte 		}
167627e0393fSCarsten Otte 
167727e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
167827e0393fSCarsten Otte 			ucasmap.length);
167927e0393fSCarsten Otte 		break;
168027e0393fSCarsten Otte 	}
168127e0393fSCarsten Otte #endif
1682ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1683ccc7910fSCarsten Otte 		r = gmap_fault(arg, vcpu->arch.gmap);
1684ccc7910fSCarsten Otte 		if (!IS_ERR_VALUE(r))
1685ccc7910fSCarsten Otte 			r = 0;
1686ccc7910fSCarsten Otte 		break;
1687ccc7910fSCarsten Otte 	}
1688d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1689d6712df9SCornelia Huck 	{
1690d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1691d6712df9SCornelia Huck 		r = -EFAULT;
1692d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1693d6712df9SCornelia Huck 			break;
1694d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1695d6712df9SCornelia Huck 		break;
1696d6712df9SCornelia Huck 	}
1697b0c632dbSHeiko Carstens 	default:
16983e6afcf1SCarsten Otte 		r = -ENOTTY;
1699b0c632dbSHeiko Carstens 	}
1700bc923cc9SAvi Kivity 	return r;
1701b0c632dbSHeiko Carstens }
1702b0c632dbSHeiko Carstens 
17035b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
17045b1c1493SCarsten Otte {
17055b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
17065b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
17075b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
17085b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
17095b1c1493SCarsten Otte 		get_page(vmf->page);
17105b1c1493SCarsten Otte 		return 0;
17115b1c1493SCarsten Otte 	}
17125b1c1493SCarsten Otte #endif
17135b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
17145b1c1493SCarsten Otte }
17155b1c1493SCarsten Otte 
17165587027cSAneesh Kumar K.V void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1717db3fe4ebSTakuya Yoshikawa 			   struct kvm_memory_slot *dont)
1718db3fe4ebSTakuya Yoshikawa {
1719db3fe4ebSTakuya Yoshikawa }
1720db3fe4ebSTakuya Yoshikawa 
17215587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
17225587027cSAneesh Kumar K.V 			    unsigned long npages)
1723db3fe4ebSTakuya Yoshikawa {
1724db3fe4ebSTakuya Yoshikawa 	return 0;
1725db3fe4ebSTakuya Yoshikawa }
1726db3fe4ebSTakuya Yoshikawa 
1727e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm)
1728e59dbe09STakuya Yoshikawa {
1729e59dbe09STakuya Yoshikawa }
1730e59dbe09STakuya Yoshikawa 
1731b0c632dbSHeiko Carstens /* Section: memory related */
1732f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1733f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
17347b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
17357b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1736b0c632dbSHeiko Carstens {
1737dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1738dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1739dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1740dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1741b0c632dbSHeiko Carstens 
1742598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1743b0c632dbSHeiko Carstens 		return -EINVAL;
1744b0c632dbSHeiko Carstens 
1745598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1746b0c632dbSHeiko Carstens 		return -EINVAL;
1747b0c632dbSHeiko Carstens 
1748f7784b8eSMarcelo Tosatti 	return 0;
1749f7784b8eSMarcelo Tosatti }
1750f7784b8eSMarcelo Tosatti 
1751f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1752f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
17538482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
17548482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1755f7784b8eSMarcelo Tosatti {
1756f7850c92SCarsten Otte 	int rc;
1757f7784b8eSMarcelo Tosatti 
17582cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
17592cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
17602cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
17612cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
17622cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
17632cef4debSChristian Borntraeger 	 */
17642cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
17652cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
17662cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
17672cef4debSChristian Borntraeger 		return;
1768598841caSCarsten Otte 
1769598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1770598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1771598841caSCarsten Otte 	if (rc)
1772f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1773598841caSCarsten Otte 	return;
1774b0c632dbSHeiko Carstens }
1775b0c632dbSHeiko Carstens 
17762df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm)
17772df72e9bSMarcelo Tosatti {
17782df72e9bSMarcelo Tosatti }
17792df72e9bSMarcelo Tosatti 
17802df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
17812df72e9bSMarcelo Tosatti 				   struct kvm_memory_slot *slot)
178234d4cb8fSMarcelo Tosatti {
178334d4cb8fSMarcelo Tosatti }
178434d4cb8fSMarcelo Tosatti 
1785b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1786b0c632dbSHeiko Carstens {
1787ef50f7acSChristian Borntraeger 	int ret;
17880ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1789ef50f7acSChristian Borntraeger 	if (ret)
1790ef50f7acSChristian Borntraeger 		return ret;
1791ef50f7acSChristian Borntraeger 
1792ef50f7acSChristian Borntraeger 	/*
1793ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
179425985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1795ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1796ef50f7acSChristian Borntraeger 	 */
179778c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
179878c4b59fSMichael Mueller 	if (!vfacilities) {
1799ef50f7acSChristian Borntraeger 		kvm_exit();
1800ef50f7acSChristian Borntraeger 		return -ENOMEM;
1801ef50f7acSChristian Borntraeger 	}
180278c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1803d208c79dSThomas Huth 	vfacilities[0] &= 0xff82fff3f4fc2000UL;
18047feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1805ef50f7acSChristian Borntraeger 	return 0;
1806b0c632dbSHeiko Carstens }
1807b0c632dbSHeiko Carstens 
1808b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1809b0c632dbSHeiko Carstens {
181078c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1811b0c632dbSHeiko Carstens 	kvm_exit();
1812b0c632dbSHeiko Carstens }
1813b0c632dbSHeiko Carstens 
1814b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1815b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1816566af940SCornelia Huck 
1817566af940SCornelia Huck /*
1818566af940SCornelia Huck  * Enable autoloading of the kvm module.
1819566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1820566af940SCornelia Huck  * since x86 takes a different approach.
1821566af940SCornelia Huck  */
1822566af940SCornelia Huck #include <linux/miscdevice.h>
1823566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1824566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1825