xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision a6b7e459ff6d569227980f711664f927100c73a8)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
54f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
58ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
597697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
60ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6769d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
69453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
70453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
71453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
72453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
738a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
75453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
76b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
77453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
78453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
79bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
805288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
827697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
855288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
875288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
88388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
89e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9041628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
91b0c632dbSHeiko Carstens 	{ NULL }
92b0c632dbSHeiko Carstens };
93b0c632dbSHeiko Carstens 
9478c4b59fSMichael Mueller unsigned long *vfacilities;
952c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
96b0c632dbSHeiko Carstens 
9778c4b59fSMichael Mueller /* test availability of vfacility */
98280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
9978c4b59fSMichael Mueller {
10078c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10178c4b59fSMichael Mueller }
10278c4b59fSMichael Mueller 
103b0c632dbSHeiko Carstens /* Section: not file related */
10413a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
105b0c632dbSHeiko Carstens {
106b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
10710474ae8SAlexander Graf 	return 0;
108b0c632dbSHeiko Carstens }
109b0c632dbSHeiko Carstens 
1102c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1112c70fe44SChristian Borntraeger 
112b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
113b0c632dbSHeiko Carstens {
1142c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1152c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
116b0c632dbSHeiko Carstens 	return 0;
117b0c632dbSHeiko Carstens }
118b0c632dbSHeiko Carstens 
119b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
120b0c632dbSHeiko Carstens {
1212c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
122b0c632dbSHeiko Carstens }
123b0c632dbSHeiko Carstens 
124b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
125b0c632dbSHeiko Carstens {
12684877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
12784877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
128b0c632dbSHeiko Carstens }
129b0c632dbSHeiko Carstens 
130b0c632dbSHeiko Carstens /* Section: device related */
131b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
132b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
133b0c632dbSHeiko Carstens {
134b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
135b0c632dbSHeiko Carstens 		return s390_enable_sie();
136b0c632dbSHeiko Carstens 	return -EINVAL;
137b0c632dbSHeiko Carstens }
138b0c632dbSHeiko Carstens 
139784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
140b0c632dbSHeiko Carstens {
141d7b0b5ebSCarsten Otte 	int r;
142d7b0b5ebSCarsten Otte 
1432bd0ac4eSCarsten Otte 	switch (ext) {
144d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
145b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
14652e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1471efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1481efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1491efd0f59SCarsten Otte #endif
1503c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
15160b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
15214eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
153d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
154fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
155ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
15610ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
157c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
158d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
15978599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
160f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1616352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
162d7b0b5ebSCarsten Otte 		r = 1;
163d7b0b5ebSCarsten Otte 		break;
164e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
165e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
166e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
167e726b1bdSChristian Borntraeger 		break;
168e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
169e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
170e1e2e605SNick Wang 		break;
1711526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
172abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1731526bf9cSChristian Borntraeger 		break;
1742bd0ac4eSCarsten Otte 	default:
175d7b0b5ebSCarsten Otte 		r = 0;
176b0c632dbSHeiko Carstens 	}
177d7b0b5ebSCarsten Otte 	return r;
1782bd0ac4eSCarsten Otte }
179b0c632dbSHeiko Carstens 
18015f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18115f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
18215f36ebdSJason J. Herne {
18315f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
18415f36ebdSJason J. Herne 	unsigned long address;
18515f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
18615f36ebdSJason J. Herne 
18715f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
18815f36ebdSJason J. Herne 	/* Loop over all guest pages */
18915f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19015f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19115f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
19215f36ebdSJason J. Herne 
19315f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
19415f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
19515f36ebdSJason J. Herne 	}
19615f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
19715f36ebdSJason J. Herne }
19815f36ebdSJason J. Herne 
199b0c632dbSHeiko Carstens /* Section: vm related */
200b0c632dbSHeiko Carstens /*
201b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
202b0c632dbSHeiko Carstens  */
203b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
204b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
205b0c632dbSHeiko Carstens {
20615f36ebdSJason J. Herne 	int r;
20715f36ebdSJason J. Herne 	unsigned long n;
20815f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
20915f36ebdSJason J. Herne 	int is_dirty = 0;
21015f36ebdSJason J. Herne 
21115f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
21215f36ebdSJason J. Herne 
21315f36ebdSJason J. Herne 	r = -EINVAL;
21415f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
21515f36ebdSJason J. Herne 		goto out;
21615f36ebdSJason J. Herne 
21715f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
21815f36ebdSJason J. Herne 	r = -ENOENT;
21915f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22015f36ebdSJason J. Herne 		goto out;
22115f36ebdSJason J. Herne 
22215f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
22315f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
22415f36ebdSJason J. Herne 	if (r)
22515f36ebdSJason J. Herne 		goto out;
22615f36ebdSJason J. Herne 
22715f36ebdSJason J. Herne 	/* Clear the dirty log */
22815f36ebdSJason J. Herne 	if (is_dirty) {
22915f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23015f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23115f36ebdSJason J. Herne 	}
23215f36ebdSJason J. Herne 	r = 0;
23315f36ebdSJason J. Herne out:
23415f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
23515f36ebdSJason J. Herne 	return r;
236b0c632dbSHeiko Carstens }
237b0c632dbSHeiko Carstens 
238d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
239d938dc55SCornelia Huck {
240d938dc55SCornelia Huck 	int r;
241d938dc55SCornelia Huck 
242d938dc55SCornelia Huck 	if (cap->flags)
243d938dc55SCornelia Huck 		return -EINVAL;
244d938dc55SCornelia Huck 
245d938dc55SCornelia Huck 	switch (cap->cap) {
24684223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
24784223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
24884223598SCornelia Huck 		r = 0;
24984223598SCornelia Huck 		break;
250d938dc55SCornelia Huck 	default:
251d938dc55SCornelia Huck 		r = -EINVAL;
252d938dc55SCornelia Huck 		break;
253d938dc55SCornelia Huck 	}
254d938dc55SCornelia Huck 	return r;
255d938dc55SCornelia Huck }
256d938dc55SCornelia Huck 
2574f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2584f718eabSDominik Dingel {
2594f718eabSDominik Dingel 	int ret;
2604f718eabSDominik Dingel 	unsigned int idx;
2614f718eabSDominik Dingel 	switch (attr->attr) {
2624f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2634f718eabSDominik Dingel 		ret = -EBUSY;
2644f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2654f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2664f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2674f718eabSDominik Dingel 			ret = 0;
2684f718eabSDominik Dingel 		}
2694f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2704f718eabSDominik Dingel 		break;
2714f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
2724f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2734f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
2744f718eabSDominik Dingel 		page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
2754f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
2764f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2774f718eabSDominik Dingel 		ret = 0;
2784f718eabSDominik Dingel 		break;
2794f718eabSDominik Dingel 	default:
2804f718eabSDominik Dingel 		ret = -ENXIO;
2814f718eabSDominik Dingel 		break;
2824f718eabSDominik Dingel 	}
2834f718eabSDominik Dingel 	return ret;
2844f718eabSDominik Dingel }
2854f718eabSDominik Dingel 
286f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
287f2061656SDominik Dingel {
288f2061656SDominik Dingel 	int ret;
289f2061656SDominik Dingel 
290f2061656SDominik Dingel 	switch (attr->group) {
2914f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
2924f718eabSDominik Dingel 		ret = kvm_s390_mem_control(kvm, attr);
2934f718eabSDominik Dingel 		break;
294f2061656SDominik Dingel 	default:
295f2061656SDominik Dingel 		ret = -ENXIO;
296f2061656SDominik Dingel 		break;
297f2061656SDominik Dingel 	}
298f2061656SDominik Dingel 
299f2061656SDominik Dingel 	return ret;
300f2061656SDominik Dingel }
301f2061656SDominik Dingel 
302f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
303f2061656SDominik Dingel {
304f2061656SDominik Dingel 	return -ENXIO;
305f2061656SDominik Dingel }
306f2061656SDominik Dingel 
307f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
308f2061656SDominik Dingel {
309f2061656SDominik Dingel 	int ret;
310f2061656SDominik Dingel 
311f2061656SDominik Dingel 	switch (attr->group) {
3124f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3134f718eabSDominik Dingel 		switch (attr->attr) {
3144f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3154f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3164f718eabSDominik Dingel 			ret = 0;
3174f718eabSDominik Dingel 			break;
3184f718eabSDominik Dingel 		default:
3194f718eabSDominik Dingel 			ret = -ENXIO;
3204f718eabSDominik Dingel 			break;
3214f718eabSDominik Dingel 		}
3224f718eabSDominik Dingel 		break;
323f2061656SDominik Dingel 	default:
324f2061656SDominik Dingel 		ret = -ENXIO;
325f2061656SDominik Dingel 		break;
326f2061656SDominik Dingel 	}
327f2061656SDominik Dingel 
328f2061656SDominik Dingel 	return ret;
329f2061656SDominik Dingel }
330f2061656SDominik Dingel 
331b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
332b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
333b0c632dbSHeiko Carstens {
334b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
335b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
336f2061656SDominik Dingel 	struct kvm_device_attr attr;
337b0c632dbSHeiko Carstens 	int r;
338b0c632dbSHeiko Carstens 
339b0c632dbSHeiko Carstens 	switch (ioctl) {
340ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
341ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
342ba5c1e9bSCarsten Otte 
343ba5c1e9bSCarsten Otte 		r = -EFAULT;
344ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
345ba5c1e9bSCarsten Otte 			break;
346ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
347ba5c1e9bSCarsten Otte 		break;
348ba5c1e9bSCarsten Otte 	}
349d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
350d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
351d938dc55SCornelia Huck 		r = -EFAULT;
352d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
353d938dc55SCornelia Huck 			break;
354d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
355d938dc55SCornelia Huck 		break;
356d938dc55SCornelia Huck 	}
35784223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
35884223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
35984223598SCornelia Huck 
36084223598SCornelia Huck 		r = -EINVAL;
36184223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
36284223598SCornelia Huck 			/* Set up dummy routing. */
36384223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
36484223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
36584223598SCornelia Huck 			r = 0;
36684223598SCornelia Huck 		}
36784223598SCornelia Huck 		break;
36884223598SCornelia Huck 	}
369f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
370f2061656SDominik Dingel 		r = -EFAULT;
371f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
372f2061656SDominik Dingel 			break;
373f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
374f2061656SDominik Dingel 		break;
375f2061656SDominik Dingel 	}
376f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
377f2061656SDominik Dingel 		r = -EFAULT;
378f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379f2061656SDominik Dingel 			break;
380f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
381f2061656SDominik Dingel 		break;
382f2061656SDominik Dingel 	}
383f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
384f2061656SDominik Dingel 		r = -EFAULT;
385f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386f2061656SDominik Dingel 			break;
387f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
388f2061656SDominik Dingel 		break;
389f2061656SDominik Dingel 	}
390b0c632dbSHeiko Carstens 	default:
391367e1319SAvi Kivity 		r = -ENOTTY;
392b0c632dbSHeiko Carstens 	}
393b0c632dbSHeiko Carstens 
394b0c632dbSHeiko Carstens 	return r;
395b0c632dbSHeiko Carstens }
396b0c632dbSHeiko Carstens 
3975102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
3985102ee87STony Krowiak {
3995102ee87STony Krowiak 	if (!test_vfacility(76))
4005102ee87STony Krowiak 		return 0;
4015102ee87STony Krowiak 
4025102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
4035102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
4045102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
4055102ee87STony Krowiak 		return -ENOMEM;
4065102ee87STony Krowiak 
4075102ee87STony Krowiak 	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
4085102ee87STony Krowiak 				  CRYCB_FORMAT1;
4095102ee87STony Krowiak 
4105102ee87STony Krowiak 	return 0;
4115102ee87STony Krowiak }
4125102ee87STony Krowiak 
413e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
414b0c632dbSHeiko Carstens {
415b0c632dbSHeiko Carstens 	int rc;
416b0c632dbSHeiko Carstens 	char debug_name[16];
417f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
418b0c632dbSHeiko Carstens 
419e08b9637SCarsten Otte 	rc = -EINVAL;
420e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
421e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
422e08b9637SCarsten Otte 		goto out_err;
423e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
424e08b9637SCarsten Otte 		goto out_err;
425e08b9637SCarsten Otte #else
426e08b9637SCarsten Otte 	if (type)
427e08b9637SCarsten Otte 		goto out_err;
428e08b9637SCarsten Otte #endif
429e08b9637SCarsten Otte 
430b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
431b0c632dbSHeiko Carstens 	if (rc)
432d89f5effSJan Kiszka 		goto out_err;
433b0c632dbSHeiko Carstens 
434b290411aSCarsten Otte 	rc = -ENOMEM;
435b290411aSCarsten Otte 
436b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
437b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
438d89f5effSJan Kiszka 		goto out_err;
439f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
440f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
441f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
442f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
443b0c632dbSHeiko Carstens 
444b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
445b0c632dbSHeiko Carstens 
446b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
447b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
448b0c632dbSHeiko Carstens 		goto out_nodbf;
449b0c632dbSHeiko Carstens 
4505102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
4515102ee87STony Krowiak 		goto out_crypto;
4525102ee87STony Krowiak 
453ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
454ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
4558a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
456*a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
457ba5c1e9bSCarsten Otte 
458b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
459b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
460b0c632dbSHeiko Carstens 
461e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
462e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
463e08b9637SCarsten Otte 	} else {
4640349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
465598841caSCarsten Otte 		if (!kvm->arch.gmap)
466598841caSCarsten Otte 			goto out_nogmap;
4672c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
46824eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
469e08b9637SCarsten Otte 	}
470fa6b7fe9SCornelia Huck 
471fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
47284223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
473fa6b7fe9SCornelia Huck 
4748ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
4758ad35755SDavid Hildenbrand 
476d89f5effSJan Kiszka 	return 0;
477598841caSCarsten Otte out_nogmap:
4785102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
4795102ee87STony Krowiak out_crypto:
480598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
481b0c632dbSHeiko Carstens out_nodbf:
482b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
483d89f5effSJan Kiszka out_err:
484d89f5effSJan Kiszka 	return rc;
485b0c632dbSHeiko Carstens }
486b0c632dbSHeiko Carstens 
487d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
488d329c035SChristian Borntraeger {
489d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
490ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
49167335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
4923c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
49358f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
49458f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
49558f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
496abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
497abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
498abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
49958f9460bSCarsten Otte 	}
500abf4a71eSCarsten Otte 	smp_mb();
50127e0393fSCarsten Otte 
50227e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
50327e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
50427e0393fSCarsten Otte 
505b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
506b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
507d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
508b31288faSKonstantin Weitz 
5096692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
510b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
511d329c035SChristian Borntraeger }
512d329c035SChristian Borntraeger 
513d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
514d329c035SChristian Borntraeger {
515d329c035SChristian Borntraeger 	unsigned int i;
516988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
517d329c035SChristian Borntraeger 
518988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
519988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
520988a2caeSGleb Natapov 
521988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
522988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
523d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
524988a2caeSGleb Natapov 
525988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
526988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
527d329c035SChristian Borntraeger }
528d329c035SChristian Borntraeger 
529b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
530b0c632dbSHeiko Carstens {
531d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
532b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
533d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
5345102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
53527e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
536598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
537841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
53867335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
539b0c632dbSHeiko Carstens }
540b0c632dbSHeiko Carstens 
541b0c632dbSHeiko Carstens /* Section: vcpu related */
542b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
543b0c632dbSHeiko Carstens {
5443c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
5453c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
54627e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm)) {
547c6c956b8SMartin Schwidefsky 		vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
54827e0393fSCarsten Otte 		if (!vcpu->arch.gmap)
54927e0393fSCarsten Otte 			return -ENOMEM;
5502c70fe44SChristian Borntraeger 		vcpu->arch.gmap->private = vcpu->kvm;
55127e0393fSCarsten Otte 		return 0;
55227e0393fSCarsten Otte 	}
55327e0393fSCarsten Otte 
554598841caSCarsten Otte 	vcpu->arch.gmap = vcpu->kvm->arch.gmap;
55559674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
55659674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
5579eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
558b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
559b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
560b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
561b0c632dbSHeiko Carstens 	return 0;
562b0c632dbSHeiko Carstens }
563b0c632dbSHeiko Carstens 
564b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
565b0c632dbSHeiko Carstens {
5664725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5674725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
568b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
5694725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5704725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
57159674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
572480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
5739e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
574b0c632dbSHeiko Carstens }
575b0c632dbSHeiko Carstens 
576b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
577b0c632dbSHeiko Carstens {
5789e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
579480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
5804725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
5814725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
58259674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
5834725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
5844725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
585b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
586b0c632dbSHeiko Carstens }
587b0c632dbSHeiko Carstens 
588b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
589b0c632dbSHeiko Carstens {
590b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
591b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
592b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
5938d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
594b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
595b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
596b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
597b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
598b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
599b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
600b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
601b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
602b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
603672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
6043c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
6053c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
6066352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
6076852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
6082ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
609b0c632dbSHeiko Carstens }
610b0c632dbSHeiko Carstens 
61142897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
61242897d86SMarcelo Tosatti {
61342897d86SMarcelo Tosatti 	return 0;
61442897d86SMarcelo Tosatti }
61542897d86SMarcelo Tosatti 
6165102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
6175102ee87STony Krowiak {
6185102ee87STony Krowiak 	if (!test_vfacility(76))
6195102ee87STony Krowiak 		return;
6205102ee87STony Krowiak 
6215102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
6225102ee87STony Krowiak }
6235102ee87STony Krowiak 
624b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
625b31605c1SDominik Dingel {
626b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
627b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
628b31605c1SDominik Dingel }
629b31605c1SDominik Dingel 
630b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
631b31605c1SDominik Dingel {
632b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
633b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
634b31605c1SDominik Dingel 		return -ENOMEM;
635b31605c1SDominik Dingel 
636b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
637b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
638b31605c1SDominik Dingel 	return 0;
639b31605c1SDominik Dingel }
640b31605c1SDominik Dingel 
641b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
642b0c632dbSHeiko Carstens {
643b31605c1SDominik Dingel 	int rc = 0;
644b31288faSKonstantin Weitz 
6459e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
6469e6dabefSCornelia Huck 						    CPUSTAT_SM |
64769d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
64869d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
649fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
6507feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
6517feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
6527feb6bb8SMichael Mueller 
65369d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
6544953919fSDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xD1002000U;
655217a4406SHeiko Carstens 	if (sclp_has_siif())
656217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
65778c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
6585a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
6595a5e6536SMatthew Rosato 				      ICTL_TPROT;
6605a5e6536SMatthew Rosato 
661b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
662b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
663b31605c1SDominik Dingel 		if (rc)
664b31605c1SDominik Dingel 			return rc;
665b31288faSKonstantin Weitz 	}
666ca872302SChristian Borntraeger 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
667ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
668453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
66992e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
6705102ee87STony Krowiak 
6715102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
6725102ee87STony Krowiak 
673b31605c1SDominik Dingel 	return rc;
674b0c632dbSHeiko Carstens }
675b0c632dbSHeiko Carstens 
676b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
677b0c632dbSHeiko Carstens 				      unsigned int id)
678b0c632dbSHeiko Carstens {
6794d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
6807feb6bb8SMichael Mueller 	struct sie_page *sie_page;
6814d47555aSCarsten Otte 	int rc = -EINVAL;
682b0c632dbSHeiko Carstens 
6834d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
6844d47555aSCarsten Otte 		goto out;
6854d47555aSCarsten Otte 
6864d47555aSCarsten Otte 	rc = -ENOMEM;
6874d47555aSCarsten Otte 
688b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
689b0c632dbSHeiko Carstens 	if (!vcpu)
6904d47555aSCarsten Otte 		goto out;
691b0c632dbSHeiko Carstens 
6927feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
6937feb6bb8SMichael Mueller 	if (!sie_page)
694b0c632dbSHeiko Carstens 		goto out_free_cpu;
695b0c632dbSHeiko Carstens 
6967feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
6977feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
6987feb6bb8SMichael Mueller 
699b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
70058f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
70158f9460bSCarsten Otte 		if (!kvm->arch.sca) {
70258f9460bSCarsten Otte 			WARN_ON_ONCE(1);
70358f9460bSCarsten Otte 			goto out_free_cpu;
70458f9460bSCarsten Otte 		}
705abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
70658f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
70758f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
70858f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
70958f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
710b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
711fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
71258f9460bSCarsten Otte 	}
713b0c632dbSHeiko Carstens 
714ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
715ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&vcpu->arch.local_int.list);
716ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
717d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
7185288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
719ba5c1e9bSCarsten Otte 
720b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
721b0c632dbSHeiko Carstens 	if (rc)
7227b06bf2fSWei Yongjun 		goto out_free_sie_block;
723b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
724b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
725ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
726b0c632dbSHeiko Carstens 
727b0c632dbSHeiko Carstens 	return vcpu;
7287b06bf2fSWei Yongjun out_free_sie_block:
7297b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
730b0c632dbSHeiko Carstens out_free_cpu:
731b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
7324d47555aSCarsten Otte out:
733b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
734b0c632dbSHeiko Carstens }
735b0c632dbSHeiko Carstens 
736b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
737b0c632dbSHeiko Carstens {
738f87618e8SMichael Mueller 	return kvm_cpu_has_interrupt(vcpu);
739b0c632dbSHeiko Carstens }
740b0c632dbSHeiko Carstens 
74149b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
74249b99e1eSChristian Borntraeger {
74349b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
74449b99e1eSChristian Borntraeger }
74549b99e1eSChristian Borntraeger 
74649b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
74749b99e1eSChristian Borntraeger {
74849b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
74949b99e1eSChristian Borntraeger }
75049b99e1eSChristian Borntraeger 
75149b99e1eSChristian Borntraeger /*
75249b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
75349b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
75449b99e1eSChristian Borntraeger  * return immediately. */
75549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
75649b99e1eSChristian Borntraeger {
75749b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
75849b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
75949b99e1eSChristian Borntraeger 		cpu_relax();
76049b99e1eSChristian Borntraeger }
76149b99e1eSChristian Borntraeger 
76249b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
76349b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
76449b99e1eSChristian Borntraeger {
76549b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
76649b99e1eSChristian Borntraeger 	exit_sie(vcpu);
76749b99e1eSChristian Borntraeger }
76849b99e1eSChristian Borntraeger 
7692c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
7702c70fe44SChristian Borntraeger {
7712c70fe44SChristian Borntraeger 	int i;
7722c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
7732c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
7742c70fe44SChristian Borntraeger 
7752c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
7762c70fe44SChristian Borntraeger 		/* match against both prefix pages */
777fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
7782c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
7792c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
7802c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
7812c70fe44SChristian Borntraeger 		}
7822c70fe44SChristian Borntraeger 	}
7832c70fe44SChristian Borntraeger }
7842c70fe44SChristian Borntraeger 
785b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
786b6d33834SChristoffer Dall {
787b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
788b6d33834SChristoffer Dall 	BUG();
789b6d33834SChristoffer Dall 	return 0;
790b6d33834SChristoffer Dall }
791b6d33834SChristoffer Dall 
79214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
79314eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
79414eebd91SCarsten Otte {
79514eebd91SCarsten Otte 	int r = -EINVAL;
79614eebd91SCarsten Otte 
79714eebd91SCarsten Otte 	switch (reg->id) {
79829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
79929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
80029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
80129b7c71bSCarsten Otte 		break;
80229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
80329b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
80429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
80529b7c71bSCarsten Otte 		break;
80646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
80746a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
80846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
80946a6dd1cSJason J. herne 		break;
81046a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
81146a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
81246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
81346a6dd1cSJason J. herne 		break;
814536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
815536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
816536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
817536336c2SDominik Dingel 		break;
818536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
819536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
820536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
821536336c2SDominik Dingel 		break;
822536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
823536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
824536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
825536336c2SDominik Dingel 		break;
826672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
827672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
828672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
829672550fbSChristian Borntraeger 		break;
830afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
831afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
832afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
833afa45ff5SChristian Borntraeger 		break;
83414eebd91SCarsten Otte 	default:
83514eebd91SCarsten Otte 		break;
83614eebd91SCarsten Otte 	}
83714eebd91SCarsten Otte 
83814eebd91SCarsten Otte 	return r;
83914eebd91SCarsten Otte }
84014eebd91SCarsten Otte 
84114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
84214eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
84314eebd91SCarsten Otte {
84414eebd91SCarsten Otte 	int r = -EINVAL;
84514eebd91SCarsten Otte 
84614eebd91SCarsten Otte 	switch (reg->id) {
84729b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
84829b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
84929b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
85029b7c71bSCarsten Otte 		break;
85129b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
85229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
85329b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
85429b7c71bSCarsten Otte 		break;
85546a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
85646a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
85746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
85846a6dd1cSJason J. herne 		break;
85946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
86046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
86146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
86246a6dd1cSJason J. herne 		break;
863536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
864536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
865536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
866536336c2SDominik Dingel 		break;
867536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
868536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
869536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
870536336c2SDominik Dingel 		break;
871536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
872536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
873536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
874536336c2SDominik Dingel 		break;
875672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
876672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
877672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
878672550fbSChristian Borntraeger 		break;
879afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
880afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
881afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
882afa45ff5SChristian Borntraeger 		break;
88314eebd91SCarsten Otte 	default:
88414eebd91SCarsten Otte 		break;
88514eebd91SCarsten Otte 	}
88614eebd91SCarsten Otte 
88714eebd91SCarsten Otte 	return r;
88814eebd91SCarsten Otte }
889b6d33834SChristoffer Dall 
890b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
891b0c632dbSHeiko Carstens {
892b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
893b0c632dbSHeiko Carstens 	return 0;
894b0c632dbSHeiko Carstens }
895b0c632dbSHeiko Carstens 
896b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
897b0c632dbSHeiko Carstens {
8985a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
899b0c632dbSHeiko Carstens 	return 0;
900b0c632dbSHeiko Carstens }
901b0c632dbSHeiko Carstens 
902b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
903b0c632dbSHeiko Carstens {
9045a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
905b0c632dbSHeiko Carstens 	return 0;
906b0c632dbSHeiko Carstens }
907b0c632dbSHeiko Carstens 
908b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
909b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
910b0c632dbSHeiko Carstens {
91159674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
912b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
91359674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
914b0c632dbSHeiko Carstens 	return 0;
915b0c632dbSHeiko Carstens }
916b0c632dbSHeiko Carstens 
917b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
918b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
919b0c632dbSHeiko Carstens {
92059674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
921b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
922b0c632dbSHeiko Carstens 	return 0;
923b0c632dbSHeiko Carstens }
924b0c632dbSHeiko Carstens 
925b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
926b0c632dbSHeiko Carstens {
9274725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
9284725c860SMartin Schwidefsky 		return -EINVAL;
929b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
9304725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
9314725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
9324725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
933b0c632dbSHeiko Carstens 	return 0;
934b0c632dbSHeiko Carstens }
935b0c632dbSHeiko Carstens 
936b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
937b0c632dbSHeiko Carstens {
938b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
939b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
940b0c632dbSHeiko Carstens 	return 0;
941b0c632dbSHeiko Carstens }
942b0c632dbSHeiko Carstens 
943b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
944b0c632dbSHeiko Carstens {
945b0c632dbSHeiko Carstens 	int rc = 0;
946b0c632dbSHeiko Carstens 
9477a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
948b0c632dbSHeiko Carstens 		rc = -EBUSY;
949d7b0b5ebSCarsten Otte 	else {
950d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
951d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
952d7b0b5ebSCarsten Otte 	}
953b0c632dbSHeiko Carstens 	return rc;
954b0c632dbSHeiko Carstens }
955b0c632dbSHeiko Carstens 
956b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
957b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
958b0c632dbSHeiko Carstens {
959b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
960b0c632dbSHeiko Carstens }
961b0c632dbSHeiko Carstens 
96227291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
96327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
96427291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
96527291e21SDavid Hildenbrand 
966d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
967d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
968b0c632dbSHeiko Carstens {
96927291e21SDavid Hildenbrand 	int rc = 0;
97027291e21SDavid Hildenbrand 
97127291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
97227291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
97327291e21SDavid Hildenbrand 
9742de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
97527291e21SDavid Hildenbrand 		return -EINVAL;
97627291e21SDavid Hildenbrand 
97727291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
97827291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
97927291e21SDavid Hildenbrand 		/* enforce guest PER */
98027291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
98127291e21SDavid Hildenbrand 
98227291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
98327291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
98427291e21SDavid Hildenbrand 	} else {
98527291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
98627291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
98727291e21SDavid Hildenbrand 	}
98827291e21SDavid Hildenbrand 
98927291e21SDavid Hildenbrand 	if (rc) {
99027291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
99127291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
99227291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
99327291e21SDavid Hildenbrand 	}
99427291e21SDavid Hildenbrand 
99527291e21SDavid Hildenbrand 	return rc;
996b0c632dbSHeiko Carstens }
997b0c632dbSHeiko Carstens 
99862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
99962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
100062d9f0dbSMarcelo Tosatti {
10016352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
10026352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
10036352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
100462d9f0dbSMarcelo Tosatti }
100562d9f0dbSMarcelo Tosatti 
100662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
100762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
100862d9f0dbSMarcelo Tosatti {
10096352e4d2SDavid Hildenbrand 	int rc = 0;
10106352e4d2SDavid Hildenbrand 
10116352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
10126352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
10136352e4d2SDavid Hildenbrand 
10146352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
10156352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
10166352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
10176352e4d2SDavid Hildenbrand 		break;
10186352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
10196352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
10206352e4d2SDavid Hildenbrand 		break;
10216352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
10226352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
10236352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
10246352e4d2SDavid Hildenbrand 	default:
10256352e4d2SDavid Hildenbrand 		rc = -ENXIO;
10266352e4d2SDavid Hildenbrand 	}
10276352e4d2SDavid Hildenbrand 
10286352e4d2SDavid Hildenbrand 	return rc;
102962d9f0dbSMarcelo Tosatti }
103062d9f0dbSMarcelo Tosatti 
1031b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1032b31605c1SDominik Dingel {
1033b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1034b31605c1SDominik Dingel 		return false;
1035b31605c1SDominik Dingel 	/* only enable for z10 and later */
1036b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1037b31605c1SDominik Dingel 		return false;
1038b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1039b31605c1SDominik Dingel 		return false;
1040b31605c1SDominik Dingel 	return true;
1041b31605c1SDominik Dingel }
1042b31605c1SDominik Dingel 
10438ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
10448ad35755SDavid Hildenbrand {
10458ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
10468ad35755SDavid Hildenbrand }
10478ad35755SDavid Hildenbrand 
10482c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
10492c70fe44SChristian Borntraeger {
10508ad35755SDavid Hildenbrand retry:
10518ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
10522c70fe44SChristian Borntraeger 	/*
10532c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
10542c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
10552c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
10562c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
10572c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
10582c70fe44SChristian Borntraeger 	 */
10598ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
10602c70fe44SChristian Borntraeger 		int rc;
10612c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1062fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
10632c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
10642c70fe44SChristian Borntraeger 		if (rc)
10652c70fe44SChristian Borntraeger 			return rc;
10668ad35755SDavid Hildenbrand 		goto retry;
10672c70fe44SChristian Borntraeger 	}
10688ad35755SDavid Hildenbrand 
1069d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1070d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1071d3d692c8SDavid Hildenbrand 		goto retry;
1072d3d692c8SDavid Hildenbrand 	}
1073d3d692c8SDavid Hildenbrand 
10748ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
10758ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
10768ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
10778ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
10788ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
10798ad35755SDavid Hildenbrand 		}
10808ad35755SDavid Hildenbrand 		goto retry;
10818ad35755SDavid Hildenbrand 	}
10828ad35755SDavid Hildenbrand 
10838ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
10848ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
10858ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
10868ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
10878ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
10888ad35755SDavid Hildenbrand 		}
10898ad35755SDavid Hildenbrand 		goto retry;
10908ad35755SDavid Hildenbrand 	}
10918ad35755SDavid Hildenbrand 
10920759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
10930759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
10940759d068SDavid Hildenbrand 
10952c70fe44SChristian Borntraeger 	return 0;
10962c70fe44SChristian Borntraeger }
10972c70fe44SChristian Borntraeger 
1098fa576c58SThomas Huth /**
1099fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1100fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1101fa576c58SThomas Huth  * @gpa: Guest physical address
1102fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1103fa576c58SThomas Huth  *
1104fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1105fa576c58SThomas Huth  *
1106fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1107fa576c58SThomas Huth  */
1108fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
110924eb3a82SDominik Dingel {
1110527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1111527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
111224eb3a82SDominik Dingel }
111324eb3a82SDominik Dingel 
11143c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
11153c038e6bSDominik Dingel 				      unsigned long token)
11163c038e6bSDominik Dingel {
11173c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
11183c038e6bSDominik Dingel 	inti.parm64 = token;
11193c038e6bSDominik Dingel 
11203c038e6bSDominik Dingel 	if (start_token) {
11213c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_INIT;
11223c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
11233c038e6bSDominik Dingel 	} else {
11243c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
11253c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
11263c038e6bSDominik Dingel 	}
11273c038e6bSDominik Dingel }
11283c038e6bSDominik Dingel 
11293c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
11303c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
11313c038e6bSDominik Dingel {
11323c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
11333c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
11343c038e6bSDominik Dingel }
11353c038e6bSDominik Dingel 
11363c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
11373c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
11383c038e6bSDominik Dingel {
11393c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
11403c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
11413c038e6bSDominik Dingel }
11423c038e6bSDominik Dingel 
11433c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
11443c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
11453c038e6bSDominik Dingel {
11463c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
11473c038e6bSDominik Dingel }
11483c038e6bSDominik Dingel 
11493c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
11503c038e6bSDominik Dingel {
11513c038e6bSDominik Dingel 	/*
11523c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
11533c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
11543c038e6bSDominik Dingel 	 */
11553c038e6bSDominik Dingel 	return true;
11563c038e6bSDominik Dingel }
11573c038e6bSDominik Dingel 
11583c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
11593c038e6bSDominik Dingel {
11603c038e6bSDominik Dingel 	hva_t hva;
11613c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
11623c038e6bSDominik Dingel 	int rc;
11633c038e6bSDominik Dingel 
11643c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
11653c038e6bSDominik Dingel 		return 0;
11663c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
11673c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
11683c038e6bSDominik Dingel 		return 0;
11693c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
11703c038e6bSDominik Dingel 		return 0;
11713c038e6bSDominik Dingel 	if (kvm_cpu_has_interrupt(vcpu))
11723c038e6bSDominik Dingel 		return 0;
11733c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
11743c038e6bSDominik Dingel 		return 0;
11753c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
11763c038e6bSDominik Dingel 		return 0;
11773c038e6bSDominik Dingel 
117881480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
117981480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
118081480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
11813c038e6bSDominik Dingel 		return 0;
11823c038e6bSDominik Dingel 
11833c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
11843c038e6bSDominik Dingel 	return rc;
11853c038e6bSDominik Dingel }
11863c038e6bSDominik Dingel 
11873fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1188b0c632dbSHeiko Carstens {
11893fb4c40fSThomas Huth 	int rc, cpuflags;
1190e168bf8dSCarsten Otte 
11913c038e6bSDominik Dingel 	/*
11923c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
11933c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
11943c038e6bSDominik Dingel 	 * handled outside the worker.
11953c038e6bSDominik Dingel 	 */
11963c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
11973c038e6bSDominik Dingel 
11985a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1199b0c632dbSHeiko Carstens 
1200b0c632dbSHeiko Carstens 	if (need_resched())
1201b0c632dbSHeiko Carstens 		schedule();
1202b0c632dbSHeiko Carstens 
1203d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
120471cde587SChristian Borntraeger 		s390_handle_mcck();
120571cde587SChristian Borntraeger 
120679395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
120779395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
120879395031SJens Freimann 		if (rc)
120979395031SJens Freimann 			return rc;
121079395031SJens Freimann 	}
12110ff31867SCarsten Otte 
12122c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
12132c70fe44SChristian Borntraeger 	if (rc)
12142c70fe44SChristian Borntraeger 		return rc;
12152c70fe44SChristian Borntraeger 
121627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
121727291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
121827291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
121927291e21SDavid Hildenbrand 	}
122027291e21SDavid Hildenbrand 
1221b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
12223fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
12233fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
12243fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
12252b29a9fdSDominik Dingel 
12263fb4c40fSThomas Huth 	return 0;
12273fb4c40fSThomas Huth }
12283fb4c40fSThomas Huth 
12293fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
12303fb4c40fSThomas Huth {
123124eb3a82SDominik Dingel 	int rc = -1;
12322b29a9fdSDominik Dingel 
12332b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
12342b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
12352b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
12362b29a9fdSDominik Dingel 
123727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
123827291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
123927291e21SDavid Hildenbrand 
12403fb4c40fSThomas Huth 	if (exit_reason >= 0) {
12417c470539SMartin Schwidefsky 		rc = 0;
1242210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1243210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1244210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1245210b1607SThomas Huth 						current->thread.gmap_addr;
1246210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1247210b1607SThomas Huth 		rc = -EREMOTE;
124824eb3a82SDominik Dingel 
124924eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
12503c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
125124eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1252fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
125324eb3a82SDominik Dingel 			rc = 0;
1254fa576c58SThomas Huth 		} else {
1255fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1256fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1257fa576c58SThomas Huth 		}
125824eb3a82SDominik Dingel 	}
125924eb3a82SDominik Dingel 
126024eb3a82SDominik Dingel 	if (rc == -1) {
1261699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1262699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1263699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
12641f0d0f09SCarsten Otte 	}
1265b0c632dbSHeiko Carstens 
12665a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
12673fb4c40fSThomas Huth 
1268a76ccff6SThomas Huth 	if (rc == 0) {
1269a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
12702955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
12712955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1272a76ccff6SThomas Huth 		else
1273a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1274a76ccff6SThomas Huth 	}
1275a76ccff6SThomas Huth 
12763fb4c40fSThomas Huth 	return rc;
12773fb4c40fSThomas Huth }
12783fb4c40fSThomas Huth 
12793fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
12803fb4c40fSThomas Huth {
12813fb4c40fSThomas Huth 	int rc, exit_reason;
12823fb4c40fSThomas Huth 
1283800c1065SThomas Huth 	/*
1284800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1285800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1286800c1065SThomas Huth 	 */
1287800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1288800c1065SThomas Huth 
1289a76ccff6SThomas Huth 	do {
12903fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
12913fb4c40fSThomas Huth 		if (rc)
1292a76ccff6SThomas Huth 			break;
12933fb4c40fSThomas Huth 
1294800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
12953fb4c40fSThomas Huth 		/*
1296a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1297a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
12983fb4c40fSThomas Huth 		 */
12993fb4c40fSThomas Huth 		preempt_disable();
13003fb4c40fSThomas Huth 		kvm_guest_enter();
13013fb4c40fSThomas Huth 		preempt_enable();
1302a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1303a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
13043fb4c40fSThomas Huth 		kvm_guest_exit();
1305800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
13063fb4c40fSThomas Huth 
13073fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
130827291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
13093fb4c40fSThomas Huth 
1310800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1311e168bf8dSCarsten Otte 	return rc;
1312b0c632dbSHeiko Carstens }
1313b0c632dbSHeiko Carstens 
1314b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1315b028ee3eSDavid Hildenbrand {
1316b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1317b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1318b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1319b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1320b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1321b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1322d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1323d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1324b028ee3eSDavid Hildenbrand 	}
1325b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1326b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1327b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1328b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1329b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1330b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1331b028ee3eSDavid Hildenbrand 	}
1332b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1333b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1334b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1335b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1336b028ee3eSDavid Hildenbrand 	}
1337b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1338b028ee3eSDavid Hildenbrand }
1339b028ee3eSDavid Hildenbrand 
1340b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1341b028ee3eSDavid Hildenbrand {
1342b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1343b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1344b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1345b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1346b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1347b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1348b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1349b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1350b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1351b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1352b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1353b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1354b028ee3eSDavid Hildenbrand }
1355b028ee3eSDavid Hildenbrand 
1356b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1357b0c632dbSHeiko Carstens {
13588f2abe6aSChristian Borntraeger 	int rc;
1359b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1360b0c632dbSHeiko Carstens 
136127291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
136227291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
136327291e21SDavid Hildenbrand 		return 0;
136427291e21SDavid Hildenbrand 	}
136527291e21SDavid Hildenbrand 
1366b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1367b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1368b0c632dbSHeiko Carstens 
13696352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
13706852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
13716352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
13726352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
13736352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
13746352e4d2SDavid Hildenbrand 		return -EINVAL;
13756352e4d2SDavid Hildenbrand 	}
1376b0c632dbSHeiko Carstens 
1377b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1378d7b0b5ebSCarsten Otte 
1379dab4079dSHeiko Carstens 	might_fault();
1380e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
13819ace903dSChristian Ehrhardt 
1382b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1383b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
13848f2abe6aSChristian Borntraeger 		rc = -EINTR;
1385b1d16c49SChristian Ehrhardt 	}
13868f2abe6aSChristian Borntraeger 
138727291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
138827291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
138927291e21SDavid Hildenbrand 		rc = 0;
139027291e21SDavid Hildenbrand 	}
139127291e21SDavid Hildenbrand 
1392b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
13938f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
13948f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
13958f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
13968f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
13978f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
13988f2abe6aSChristian Borntraeger 		rc = 0;
13998f2abe6aSChristian Borntraeger 	}
14008f2abe6aSChristian Borntraeger 
14018f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
14028f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
14038f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
14048f2abe6aSChristian Borntraeger 		rc = 0;
14058f2abe6aSChristian Borntraeger 	}
14068f2abe6aSChristian Borntraeger 
1407b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1408d7b0b5ebSCarsten Otte 
1409b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1410b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1411b0c632dbSHeiko Carstens 
1412b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
14137e8e6ab4SHeiko Carstens 	return rc;
1414b0c632dbSHeiko Carstens }
1415b0c632dbSHeiko Carstens 
1416b0c632dbSHeiko Carstens /*
1417b0c632dbSHeiko Carstens  * store status at address
1418b0c632dbSHeiko Carstens  * we use have two special cases:
1419b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1420b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1421b0c632dbSHeiko Carstens  */
1422d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1423b0c632dbSHeiko Carstens {
1424092670cdSCarsten Otte 	unsigned char archmode = 1;
1425fda902cbSMichael Mueller 	unsigned int px;
1426178bd789SThomas Huth 	u64 clkcomp;
1427d0bce605SHeiko Carstens 	int rc;
1428b0c632dbSHeiko Carstens 
1429d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1430d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1431b0c632dbSHeiko Carstens 			return -EFAULT;
1432d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1433d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1434d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1435b0c632dbSHeiko Carstens 			return -EFAULT;
1436d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1437d0bce605SHeiko Carstens 	}
1438d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1439d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1440d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1441d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1442d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1443d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1444fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1445d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1446fda902cbSMichael Mueller 			      &px, 4);
1447d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1448d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1449d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1450d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1451d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1452d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1453d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1454178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1455d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1456d0bce605SHeiko Carstens 			      &clkcomp, 8);
1457d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1458d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1459d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1460d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1461d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1462b0c632dbSHeiko Carstens }
1463b0c632dbSHeiko Carstens 
1464e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1465e879892cSThomas Huth {
1466e879892cSThomas Huth 	/*
1467e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1468e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1469e879892cSThomas Huth 	 * it into the save area
1470e879892cSThomas Huth 	 */
1471e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1472e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1473e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1474e879892cSThomas Huth 
1475e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1476e879892cSThomas Huth }
1477e879892cSThomas Huth 
14788ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14798ad35755SDavid Hildenbrand {
14808ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
14818ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
14828ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
14838ad35755SDavid Hildenbrand }
14848ad35755SDavid Hildenbrand 
14858ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
14868ad35755SDavid Hildenbrand {
14878ad35755SDavid Hildenbrand 	unsigned int i;
14888ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
14898ad35755SDavid Hildenbrand 
14908ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
14918ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
14928ad35755SDavid Hildenbrand 	}
14938ad35755SDavid Hildenbrand }
14948ad35755SDavid Hildenbrand 
14958ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
14968ad35755SDavid Hildenbrand {
14978ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
14988ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
14998ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
15008ad35755SDavid Hildenbrand }
15018ad35755SDavid Hildenbrand 
15026852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
15036852d7b6SDavid Hildenbrand {
15048ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15058ad35755SDavid Hildenbrand 
15068ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
15078ad35755SDavid Hildenbrand 		return;
15088ad35755SDavid Hildenbrand 
15096852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
15108ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1511433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15128ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15138ad35755SDavid Hildenbrand 
15148ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15158ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
15168ad35755SDavid Hildenbrand 			started_vcpus++;
15178ad35755SDavid Hildenbrand 	}
15188ad35755SDavid Hildenbrand 
15198ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
15208ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
15218ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
15228ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
15238ad35755SDavid Hildenbrand 		/*
15248ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
15258ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
15268ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
15278ad35755SDavid Hildenbrand 		 */
15288ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
15298ad35755SDavid Hildenbrand 	}
15308ad35755SDavid Hildenbrand 
15316852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
15328ad35755SDavid Hildenbrand 	/*
15338ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
15348ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
15358ad35755SDavid Hildenbrand 	 */
1536d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1537433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
15388ad35755SDavid Hildenbrand 	return;
15396852d7b6SDavid Hildenbrand }
15406852d7b6SDavid Hildenbrand 
15416852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
15426852d7b6SDavid Hildenbrand {
15438ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15448ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
15458ad35755SDavid Hildenbrand 
15468ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
15478ad35755SDavid Hildenbrand 		return;
15488ad35755SDavid Hildenbrand 
15496852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
15508ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1551433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15528ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15538ad35755SDavid Hildenbrand 
155432f5ff63SDavid Hildenbrand 	/* Need to lock access to action_bits to avoid a SIGP race condition */
15554ae3c081SDavid Hildenbrand 	spin_lock(&vcpu->arch.local_int.lock);
15566852d7b6SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
155732f5ff63SDavid Hildenbrand 
155832f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
155932f5ff63SDavid Hildenbrand 	vcpu->arch.local_int.action_bits &=
156032f5ff63SDavid Hildenbrand 				 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
15614ae3c081SDavid Hildenbrand 	spin_unlock(&vcpu->arch.local_int.lock);
156232f5ff63SDavid Hildenbrand 
15638ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
15648ad35755SDavid Hildenbrand 
15658ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15668ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
15678ad35755SDavid Hildenbrand 			started_vcpus++;
15688ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
15698ad35755SDavid Hildenbrand 		}
15708ad35755SDavid Hildenbrand 	}
15718ad35755SDavid Hildenbrand 
15728ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
15738ad35755SDavid Hildenbrand 		/*
15748ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
15758ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
15768ad35755SDavid Hildenbrand 		 */
15778ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
15788ad35755SDavid Hildenbrand 	}
15798ad35755SDavid Hildenbrand 
1580433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
15818ad35755SDavid Hildenbrand 	return;
15826852d7b6SDavid Hildenbrand }
15836852d7b6SDavid Hildenbrand 
1584d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1585d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1586d6712df9SCornelia Huck {
1587d6712df9SCornelia Huck 	int r;
1588d6712df9SCornelia Huck 
1589d6712df9SCornelia Huck 	if (cap->flags)
1590d6712df9SCornelia Huck 		return -EINVAL;
1591d6712df9SCornelia Huck 
1592d6712df9SCornelia Huck 	switch (cap->cap) {
1593fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1594fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1595fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1596fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1597fa6b7fe9SCornelia Huck 		}
1598fa6b7fe9SCornelia Huck 		r = 0;
1599fa6b7fe9SCornelia Huck 		break;
1600d6712df9SCornelia Huck 	default:
1601d6712df9SCornelia Huck 		r = -EINVAL;
1602d6712df9SCornelia Huck 		break;
1603d6712df9SCornelia Huck 	}
1604d6712df9SCornelia Huck 	return r;
1605d6712df9SCornelia Huck }
1606d6712df9SCornelia Huck 
1607b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1608b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1609b0c632dbSHeiko Carstens {
1610b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1611b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1612800c1065SThomas Huth 	int idx;
1613bc923cc9SAvi Kivity 	long r;
1614b0c632dbSHeiko Carstens 
161593736624SAvi Kivity 	switch (ioctl) {
161693736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1617ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1618ba5c1e9bSCarsten Otte 
161993736624SAvi Kivity 		r = -EFAULT;
1620ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
162193736624SAvi Kivity 			break;
162293736624SAvi Kivity 		r = kvm_s390_inject_vcpu(vcpu, &s390int);
162393736624SAvi Kivity 		break;
1624ba5c1e9bSCarsten Otte 	}
1625b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1626800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1627bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1628800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1629bc923cc9SAvi Kivity 		break;
1630b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1631b0c632dbSHeiko Carstens 		psw_t psw;
1632b0c632dbSHeiko Carstens 
1633bc923cc9SAvi Kivity 		r = -EFAULT;
1634b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1635bc923cc9SAvi Kivity 			break;
1636bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1637bc923cc9SAvi Kivity 		break;
1638b0c632dbSHeiko Carstens 	}
1639b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1640bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1641bc923cc9SAvi Kivity 		break;
164214eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
164314eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
164414eebd91SCarsten Otte 		struct kvm_one_reg reg;
164514eebd91SCarsten Otte 		r = -EFAULT;
164614eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
164714eebd91SCarsten Otte 			break;
164814eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
164914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
165014eebd91SCarsten Otte 		else
165114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
165214eebd91SCarsten Otte 		break;
165314eebd91SCarsten Otte 	}
165427e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
165527e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
165627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
165727e0393fSCarsten Otte 
165827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
165927e0393fSCarsten Otte 			r = -EFAULT;
166027e0393fSCarsten Otte 			break;
166127e0393fSCarsten Otte 		}
166227e0393fSCarsten Otte 
166327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
166427e0393fSCarsten Otte 			r = -EINVAL;
166527e0393fSCarsten Otte 			break;
166627e0393fSCarsten Otte 		}
166727e0393fSCarsten Otte 
166827e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
166927e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
167027e0393fSCarsten Otte 		break;
167127e0393fSCarsten Otte 	}
167227e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
167327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
167427e0393fSCarsten Otte 
167527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
167627e0393fSCarsten Otte 			r = -EFAULT;
167727e0393fSCarsten Otte 			break;
167827e0393fSCarsten Otte 		}
167927e0393fSCarsten Otte 
168027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
168127e0393fSCarsten Otte 			r = -EINVAL;
168227e0393fSCarsten Otte 			break;
168327e0393fSCarsten Otte 		}
168427e0393fSCarsten Otte 
168527e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
168627e0393fSCarsten Otte 			ucasmap.length);
168727e0393fSCarsten Otte 		break;
168827e0393fSCarsten Otte 	}
168927e0393fSCarsten Otte #endif
1690ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1691527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
1692ccc7910fSCarsten Otte 		break;
1693ccc7910fSCarsten Otte 	}
1694d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1695d6712df9SCornelia Huck 	{
1696d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1697d6712df9SCornelia Huck 		r = -EFAULT;
1698d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1699d6712df9SCornelia Huck 			break;
1700d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1701d6712df9SCornelia Huck 		break;
1702d6712df9SCornelia Huck 	}
1703b0c632dbSHeiko Carstens 	default:
17043e6afcf1SCarsten Otte 		r = -ENOTTY;
1705b0c632dbSHeiko Carstens 	}
1706bc923cc9SAvi Kivity 	return r;
1707b0c632dbSHeiko Carstens }
1708b0c632dbSHeiko Carstens 
17095b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
17105b1c1493SCarsten Otte {
17115b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
17125b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
17135b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
17145b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
17155b1c1493SCarsten Otte 		get_page(vmf->page);
17165b1c1493SCarsten Otte 		return 0;
17175b1c1493SCarsten Otte 	}
17185b1c1493SCarsten Otte #endif
17195b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
17205b1c1493SCarsten Otte }
17215b1c1493SCarsten Otte 
17225587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
17235587027cSAneesh Kumar K.V 			    unsigned long npages)
1724db3fe4ebSTakuya Yoshikawa {
1725db3fe4ebSTakuya Yoshikawa 	return 0;
1726db3fe4ebSTakuya Yoshikawa }
1727db3fe4ebSTakuya Yoshikawa 
1728b0c632dbSHeiko Carstens /* Section: memory related */
1729f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1730f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
17317b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
17327b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1733b0c632dbSHeiko Carstens {
1734dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1735dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1736dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1737dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1738b0c632dbSHeiko Carstens 
1739598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1740b0c632dbSHeiko Carstens 		return -EINVAL;
1741b0c632dbSHeiko Carstens 
1742598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1743b0c632dbSHeiko Carstens 		return -EINVAL;
1744b0c632dbSHeiko Carstens 
1745f7784b8eSMarcelo Tosatti 	return 0;
1746f7784b8eSMarcelo Tosatti }
1747f7784b8eSMarcelo Tosatti 
1748f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1749f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
17508482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
17518482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1752f7784b8eSMarcelo Tosatti {
1753f7850c92SCarsten Otte 	int rc;
1754f7784b8eSMarcelo Tosatti 
17552cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
17562cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
17572cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
17582cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
17592cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
17602cef4debSChristian Borntraeger 	 */
17612cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
17622cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
17632cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
17642cef4debSChristian Borntraeger 		return;
1765598841caSCarsten Otte 
1766598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1767598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1768598841caSCarsten Otte 	if (rc)
1769f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1770598841caSCarsten Otte 	return;
1771b0c632dbSHeiko Carstens }
1772b0c632dbSHeiko Carstens 
1773b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1774b0c632dbSHeiko Carstens {
1775ef50f7acSChristian Borntraeger 	int ret;
17760ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1777ef50f7acSChristian Borntraeger 	if (ret)
1778ef50f7acSChristian Borntraeger 		return ret;
1779ef50f7acSChristian Borntraeger 
1780ef50f7acSChristian Borntraeger 	/*
1781ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
178225985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1783ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1784ef50f7acSChristian Borntraeger 	 */
178578c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
178678c4b59fSMichael Mueller 	if (!vfacilities) {
1787ef50f7acSChristian Borntraeger 		kvm_exit();
1788ef50f7acSChristian Borntraeger 		return -ENOMEM;
1789ef50f7acSChristian Borntraeger 	}
179078c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
17917be81a46SChristian Borntraeger 	vfacilities[0] &= 0xff82fffbf47c2000UL;
17927feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1793ef50f7acSChristian Borntraeger 	return 0;
1794b0c632dbSHeiko Carstens }
1795b0c632dbSHeiko Carstens 
1796b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1797b0c632dbSHeiko Carstens {
179878c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1799b0c632dbSHeiko Carstens 	kvm_exit();
1800b0c632dbSHeiko Carstens }
1801b0c632dbSHeiko Carstens 
1802b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1803b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1804566af940SCornelia Huck 
1805566af940SCornelia Huck /*
1806566af940SCornelia Huck  * Enable autoloading of the kvm module.
1807566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1808566af940SCornelia Huck  * since x86 takes a different approach.
1809566af940SCornelia Huck  */
1810566af940SCornelia Huck #include <linux/miscdevice.h>
1811566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1812566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1813