xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 9fbd80828cef1b1bba5a293609a021047bb86a7e)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
54f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
58ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
597697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
60ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6769d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
69453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
70453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
71453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
72453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
738a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
75453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
76b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
77453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
78453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
79bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
805288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
827697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
895288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9342cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
95388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
96e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9741628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
98b0c632dbSHeiko Carstens 	{ NULL }
99b0c632dbSHeiko Carstens };
100b0c632dbSHeiko Carstens 
10178c4b59fSMichael Mueller unsigned long *vfacilities;
1022c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
103b0c632dbSHeiko Carstens 
10478c4b59fSMichael Mueller /* test availability of vfacility */
105280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
10678c4b59fSMichael Mueller {
10778c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10878c4b59fSMichael Mueller }
10978c4b59fSMichael Mueller 
110b0c632dbSHeiko Carstens /* Section: not file related */
11113a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
112b0c632dbSHeiko Carstens {
113b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
11410474ae8SAlexander Graf 	return 0;
115b0c632dbSHeiko Carstens }
116b0c632dbSHeiko Carstens 
1172c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1182c70fe44SChristian Borntraeger 
119b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
120b0c632dbSHeiko Carstens {
1212c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1222c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
123b0c632dbSHeiko Carstens 	return 0;
124b0c632dbSHeiko Carstens }
125b0c632dbSHeiko Carstens 
126b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
127b0c632dbSHeiko Carstens {
1282c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
13384877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
13484877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens /* Section: device related */
138b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
139b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
140b0c632dbSHeiko Carstens {
141b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
142b0c632dbSHeiko Carstens 		return s390_enable_sie();
143b0c632dbSHeiko Carstens 	return -EINVAL;
144b0c632dbSHeiko Carstens }
145b0c632dbSHeiko Carstens 
146784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
147b0c632dbSHeiko Carstens {
148d7b0b5ebSCarsten Otte 	int r;
149d7b0b5ebSCarsten Otte 
1502bd0ac4eSCarsten Otte 	switch (ext) {
151d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
152b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15352e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1541efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1551efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1561efd0f59SCarsten Otte #endif
1573c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
15860b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
15914eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
160d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
161fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
162ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16310ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
164c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
165d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
16678599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
167f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1686352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
169d7b0b5ebSCarsten Otte 		r = 1;
170d7b0b5ebSCarsten Otte 		break;
171e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
172e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
173e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
174e726b1bdSChristian Borntraeger 		break;
175e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
176e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
177e1e2e605SNick Wang 		break;
1781526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
179abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1801526bf9cSChristian Borntraeger 		break;
1812bd0ac4eSCarsten Otte 	default:
182d7b0b5ebSCarsten Otte 		r = 0;
183b0c632dbSHeiko Carstens 	}
184d7b0b5ebSCarsten Otte 	return r;
1852bd0ac4eSCarsten Otte }
186b0c632dbSHeiko Carstens 
18715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18815f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
18915f36ebdSJason J. Herne {
19015f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19115f36ebdSJason J. Herne 	unsigned long address;
19215f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19315f36ebdSJason J. Herne 
19415f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19515f36ebdSJason J. Herne 	/* Loop over all guest pages */
19615f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19715f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19815f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
19915f36ebdSJason J. Herne 
20015f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20115f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20215f36ebdSJason J. Herne 	}
20315f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20415f36ebdSJason J. Herne }
20515f36ebdSJason J. Herne 
206b0c632dbSHeiko Carstens /* Section: vm related */
207b0c632dbSHeiko Carstens /*
208b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
209b0c632dbSHeiko Carstens  */
210b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
212b0c632dbSHeiko Carstens {
21315f36ebdSJason J. Herne 	int r;
21415f36ebdSJason J. Herne 	unsigned long n;
21515f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21615f36ebdSJason J. Herne 	int is_dirty = 0;
21715f36ebdSJason J. Herne 
21815f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
21915f36ebdSJason J. Herne 
22015f36ebdSJason J. Herne 	r = -EINVAL;
22115f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22215f36ebdSJason J. Herne 		goto out;
22315f36ebdSJason J. Herne 
22415f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22515f36ebdSJason J. Herne 	r = -ENOENT;
22615f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22715f36ebdSJason J. Herne 		goto out;
22815f36ebdSJason J. Herne 
22915f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23015f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23115f36ebdSJason J. Herne 	if (r)
23215f36ebdSJason J. Herne 		goto out;
23315f36ebdSJason J. Herne 
23415f36ebdSJason J. Herne 	/* Clear the dirty log */
23515f36ebdSJason J. Herne 	if (is_dirty) {
23615f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23715f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23815f36ebdSJason J. Herne 	}
23915f36ebdSJason J. Herne 	r = 0;
24015f36ebdSJason J. Herne out:
24115f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24215f36ebdSJason J. Herne 	return r;
243b0c632dbSHeiko Carstens }
244b0c632dbSHeiko Carstens 
245d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246d938dc55SCornelia Huck {
247d938dc55SCornelia Huck 	int r;
248d938dc55SCornelia Huck 
249d938dc55SCornelia Huck 	if (cap->flags)
250d938dc55SCornelia Huck 		return -EINVAL;
251d938dc55SCornelia Huck 
252d938dc55SCornelia Huck 	switch (cap->cap) {
25384223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25484223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25584223598SCornelia Huck 		r = 0;
25684223598SCornelia Huck 		break;
257d938dc55SCornelia Huck 	default:
258d938dc55SCornelia Huck 		r = -EINVAL;
259d938dc55SCornelia Huck 		break;
260d938dc55SCornelia Huck 	}
261d938dc55SCornelia Huck 	return r;
262d938dc55SCornelia Huck }
263d938dc55SCornelia Huck 
2648c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2658c0a7ce6SDominik Dingel {
2668c0a7ce6SDominik Dingel 	int ret;
2678c0a7ce6SDominik Dingel 
2688c0a7ce6SDominik Dingel 	switch (attr->attr) {
2698c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2708c0a7ce6SDominik Dingel 		ret = 0;
2718c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2728c0a7ce6SDominik Dingel 			ret = -EFAULT;
2738c0a7ce6SDominik Dingel 		break;
2748c0a7ce6SDominik Dingel 	default:
2758c0a7ce6SDominik Dingel 		ret = -ENXIO;
2768c0a7ce6SDominik Dingel 		break;
2778c0a7ce6SDominik Dingel 	}
2788c0a7ce6SDominik Dingel 	return ret;
2798c0a7ce6SDominik Dingel }
2808c0a7ce6SDominik Dingel 
2818c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2824f718eabSDominik Dingel {
2834f718eabSDominik Dingel 	int ret;
2844f718eabSDominik Dingel 	unsigned int idx;
2854f718eabSDominik Dingel 	switch (attr->attr) {
2864f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2874f718eabSDominik Dingel 		ret = -EBUSY;
2884f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2894f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2904f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2914f718eabSDominik Dingel 			ret = 0;
2924f718eabSDominik Dingel 		}
2934f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2944f718eabSDominik Dingel 		break;
2954f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
2964f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2974f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
298a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
2994f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3004f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3014f718eabSDominik Dingel 		ret = 0;
3024f718eabSDominik Dingel 		break;
3038c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3048c0a7ce6SDominik Dingel 		unsigned long new_limit;
3058c0a7ce6SDominik Dingel 
3068c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3078c0a7ce6SDominik Dingel 			return -EINVAL;
3088c0a7ce6SDominik Dingel 
3098c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3108c0a7ce6SDominik Dingel 			return -EFAULT;
3118c0a7ce6SDominik Dingel 
3128c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3138c0a7ce6SDominik Dingel 			return -E2BIG;
3148c0a7ce6SDominik Dingel 
3158c0a7ce6SDominik Dingel 		ret = -EBUSY;
3168c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3178c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3188c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3198c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3208c0a7ce6SDominik Dingel 
3218c0a7ce6SDominik Dingel 			if (!new) {
3228c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3238c0a7ce6SDominik Dingel 			} else {
3248c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3258c0a7ce6SDominik Dingel 				new->private = kvm;
3268c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3278c0a7ce6SDominik Dingel 				ret = 0;
3288c0a7ce6SDominik Dingel 			}
3298c0a7ce6SDominik Dingel 		}
3308c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3318c0a7ce6SDominik Dingel 		break;
3328c0a7ce6SDominik Dingel 	}
3334f718eabSDominik Dingel 	default:
3344f718eabSDominik Dingel 		ret = -ENXIO;
3354f718eabSDominik Dingel 		break;
3364f718eabSDominik Dingel 	}
3374f718eabSDominik Dingel 	return ret;
3384f718eabSDominik Dingel }
3394f718eabSDominik Dingel 
340f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
341f2061656SDominik Dingel {
342f2061656SDominik Dingel 	int ret;
343f2061656SDominik Dingel 
344f2061656SDominik Dingel 	switch (attr->group) {
3454f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3468c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
3474f718eabSDominik Dingel 		break;
348f2061656SDominik Dingel 	default:
349f2061656SDominik Dingel 		ret = -ENXIO;
350f2061656SDominik Dingel 		break;
351f2061656SDominik Dingel 	}
352f2061656SDominik Dingel 
353f2061656SDominik Dingel 	return ret;
354f2061656SDominik Dingel }
355f2061656SDominik Dingel 
356f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
357f2061656SDominik Dingel {
3588c0a7ce6SDominik Dingel 	int ret;
3598c0a7ce6SDominik Dingel 
3608c0a7ce6SDominik Dingel 	switch (attr->group) {
3618c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3628c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
3638c0a7ce6SDominik Dingel 		break;
3648c0a7ce6SDominik Dingel 	default:
3658c0a7ce6SDominik Dingel 		ret = -ENXIO;
3668c0a7ce6SDominik Dingel 		break;
3678c0a7ce6SDominik Dingel 	}
3688c0a7ce6SDominik Dingel 
3698c0a7ce6SDominik Dingel 	return ret;
370f2061656SDominik Dingel }
371f2061656SDominik Dingel 
372f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
373f2061656SDominik Dingel {
374f2061656SDominik Dingel 	int ret;
375f2061656SDominik Dingel 
376f2061656SDominik Dingel 	switch (attr->group) {
3774f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3784f718eabSDominik Dingel 		switch (attr->attr) {
3794f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3804f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3818c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
3824f718eabSDominik Dingel 			ret = 0;
3834f718eabSDominik Dingel 			break;
3844f718eabSDominik Dingel 		default:
3854f718eabSDominik Dingel 			ret = -ENXIO;
3864f718eabSDominik Dingel 			break;
3874f718eabSDominik Dingel 		}
3884f718eabSDominik Dingel 		break;
389f2061656SDominik Dingel 	default:
390f2061656SDominik Dingel 		ret = -ENXIO;
391f2061656SDominik Dingel 		break;
392f2061656SDominik Dingel 	}
393f2061656SDominik Dingel 
394f2061656SDominik Dingel 	return ret;
395f2061656SDominik Dingel }
396f2061656SDominik Dingel 
397b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
398b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
399b0c632dbSHeiko Carstens {
400b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
401b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
402f2061656SDominik Dingel 	struct kvm_device_attr attr;
403b0c632dbSHeiko Carstens 	int r;
404b0c632dbSHeiko Carstens 
405b0c632dbSHeiko Carstens 	switch (ioctl) {
406ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
407ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
408ba5c1e9bSCarsten Otte 
409ba5c1e9bSCarsten Otte 		r = -EFAULT;
410ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
411ba5c1e9bSCarsten Otte 			break;
412ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
413ba5c1e9bSCarsten Otte 		break;
414ba5c1e9bSCarsten Otte 	}
415d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
416d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
417d938dc55SCornelia Huck 		r = -EFAULT;
418d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
419d938dc55SCornelia Huck 			break;
420d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
421d938dc55SCornelia Huck 		break;
422d938dc55SCornelia Huck 	}
42384223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
42484223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
42584223598SCornelia Huck 
42684223598SCornelia Huck 		r = -EINVAL;
42784223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
42884223598SCornelia Huck 			/* Set up dummy routing. */
42984223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
43084223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
43184223598SCornelia Huck 			r = 0;
43284223598SCornelia Huck 		}
43384223598SCornelia Huck 		break;
43484223598SCornelia Huck 	}
435f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
436f2061656SDominik Dingel 		r = -EFAULT;
437f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
438f2061656SDominik Dingel 			break;
439f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
440f2061656SDominik Dingel 		break;
441f2061656SDominik Dingel 	}
442f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
443f2061656SDominik Dingel 		r = -EFAULT;
444f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
445f2061656SDominik Dingel 			break;
446f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
447f2061656SDominik Dingel 		break;
448f2061656SDominik Dingel 	}
449f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
450f2061656SDominik Dingel 		r = -EFAULT;
451f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
452f2061656SDominik Dingel 			break;
453f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
454f2061656SDominik Dingel 		break;
455f2061656SDominik Dingel 	}
456b0c632dbSHeiko Carstens 	default:
457367e1319SAvi Kivity 		r = -ENOTTY;
458b0c632dbSHeiko Carstens 	}
459b0c632dbSHeiko Carstens 
460b0c632dbSHeiko Carstens 	return r;
461b0c632dbSHeiko Carstens }
462b0c632dbSHeiko Carstens 
4635102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
4645102ee87STony Krowiak {
4655102ee87STony Krowiak 	if (!test_vfacility(76))
4665102ee87STony Krowiak 		return 0;
4675102ee87STony Krowiak 
4685102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
4695102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
4705102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
4715102ee87STony Krowiak 		return -ENOMEM;
4725102ee87STony Krowiak 
4735102ee87STony Krowiak 	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
4745102ee87STony Krowiak 				  CRYCB_FORMAT1;
4755102ee87STony Krowiak 
4765102ee87STony Krowiak 	return 0;
4775102ee87STony Krowiak }
4785102ee87STony Krowiak 
479e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
480b0c632dbSHeiko Carstens {
481b0c632dbSHeiko Carstens 	int rc;
482b0c632dbSHeiko Carstens 	char debug_name[16];
483f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
484b0c632dbSHeiko Carstens 
485e08b9637SCarsten Otte 	rc = -EINVAL;
486e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
487e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
488e08b9637SCarsten Otte 		goto out_err;
489e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
490e08b9637SCarsten Otte 		goto out_err;
491e08b9637SCarsten Otte #else
492e08b9637SCarsten Otte 	if (type)
493e08b9637SCarsten Otte 		goto out_err;
494e08b9637SCarsten Otte #endif
495e08b9637SCarsten Otte 
496b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
497b0c632dbSHeiko Carstens 	if (rc)
498d89f5effSJan Kiszka 		goto out_err;
499b0c632dbSHeiko Carstens 
500b290411aSCarsten Otte 	rc = -ENOMEM;
501b290411aSCarsten Otte 
502b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
503b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
504d89f5effSJan Kiszka 		goto out_err;
505f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
506f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
507f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
508f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
509b0c632dbSHeiko Carstens 
510b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
511b0c632dbSHeiko Carstens 
512b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
513b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
514b0c632dbSHeiko Carstens 		goto out_nodbf;
515b0c632dbSHeiko Carstens 
5165102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
5175102ee87STony Krowiak 		goto out_crypto;
5185102ee87STony Krowiak 
519ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
520ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
5218a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
522a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
523ba5c1e9bSCarsten Otte 
524b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
525b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
526b0c632dbSHeiko Carstens 
527e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
528e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
529e08b9637SCarsten Otte 	} else {
5300349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
531598841caSCarsten Otte 		if (!kvm->arch.gmap)
532598841caSCarsten Otte 			goto out_nogmap;
5332c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
53424eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
535e08b9637SCarsten Otte 	}
536fa6b7fe9SCornelia Huck 
537fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
53884223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
539fa6b7fe9SCornelia Huck 
5408ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
5418ad35755SDavid Hildenbrand 
542d89f5effSJan Kiszka 	return 0;
543598841caSCarsten Otte out_nogmap:
5445102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
5455102ee87STony Krowiak out_crypto:
546598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
547b0c632dbSHeiko Carstens out_nodbf:
548b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
549d89f5effSJan Kiszka out_err:
550d89f5effSJan Kiszka 	return rc;
551b0c632dbSHeiko Carstens }
552b0c632dbSHeiko Carstens 
553d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
554d329c035SChristian Borntraeger {
555d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
556ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
55767335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
5583c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
55958f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
56058f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
56158f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
562abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
563abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
564abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
56558f9460bSCarsten Otte 	}
566abf4a71eSCarsten Otte 	smp_mb();
56727e0393fSCarsten Otte 
56827e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
56927e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
57027e0393fSCarsten Otte 
571b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
572b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
573d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
574b31288faSKonstantin Weitz 
5756692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
576b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
577d329c035SChristian Borntraeger }
578d329c035SChristian Borntraeger 
579d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
580d329c035SChristian Borntraeger {
581d329c035SChristian Borntraeger 	unsigned int i;
582988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
583d329c035SChristian Borntraeger 
584988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
585988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
586988a2caeSGleb Natapov 
587988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
588988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
589d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
590988a2caeSGleb Natapov 
591988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
592988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
593d329c035SChristian Borntraeger }
594d329c035SChristian Borntraeger 
595b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
596b0c632dbSHeiko Carstens {
597d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
598b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
599d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
6005102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
60127e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
602598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
603841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
60467335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
605b0c632dbSHeiko Carstens }
606b0c632dbSHeiko Carstens 
607b0c632dbSHeiko Carstens /* Section: vcpu related */
608dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
609b0c632dbSHeiko Carstens {
610c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
61127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
61227e0393fSCarsten Otte 		return -ENOMEM;
6132c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
614dafd032aSDominik Dingel 
61527e0393fSCarsten Otte 	return 0;
61627e0393fSCarsten Otte }
61727e0393fSCarsten Otte 
618dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
619dafd032aSDominik Dingel {
620dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
621dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
62259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
62359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
6249eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
625b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
626b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
627b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
628dafd032aSDominik Dingel 
629dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
630dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
631dafd032aSDominik Dingel 
632b0c632dbSHeiko Carstens 	return 0;
633b0c632dbSHeiko Carstens }
634b0c632dbSHeiko Carstens 
635b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
636b0c632dbSHeiko Carstens {
6374725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
6384725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
639b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
6404725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
6414725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
64259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
643480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
6449e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
645b0c632dbSHeiko Carstens }
646b0c632dbSHeiko Carstens 
647b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
648b0c632dbSHeiko Carstens {
6499e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
650480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
6514725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
6524725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
65359674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
6544725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
6554725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
656b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
657b0c632dbSHeiko Carstens }
658b0c632dbSHeiko Carstens 
659b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
660b0c632dbSHeiko Carstens {
661b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
662b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
663b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
6648d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
665b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
666b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
667b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
668b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
669b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
670b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
671b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
672b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
673b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
674672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
6753c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
6763c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
6776352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
6786852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
6792ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
680b0c632dbSHeiko Carstens }
681b0c632dbSHeiko Carstens 
68231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
68342897d86SMarcelo Tosatti {
684dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
685dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
68642897d86SMarcelo Tosatti }
68742897d86SMarcelo Tosatti 
6885102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
6895102ee87STony Krowiak {
6905102ee87STony Krowiak 	if (!test_vfacility(76))
6915102ee87STony Krowiak 		return;
6925102ee87STony Krowiak 
6935102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
6945102ee87STony Krowiak }
6955102ee87STony Krowiak 
696b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
697b31605c1SDominik Dingel {
698b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
699b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
700b31605c1SDominik Dingel }
701b31605c1SDominik Dingel 
702b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
703b31605c1SDominik Dingel {
704b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
705b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
706b31605c1SDominik Dingel 		return -ENOMEM;
707b31605c1SDominik Dingel 
708b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
709b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
710b31605c1SDominik Dingel 	return 0;
711b31605c1SDominik Dingel }
712b31605c1SDominik Dingel 
713b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
714b0c632dbSHeiko Carstens {
715b31605c1SDominik Dingel 	int rc = 0;
716b31288faSKonstantin Weitz 
7179e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
7189e6dabefSCornelia Huck 						    CPUSTAT_SM |
71969d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
72069d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
721fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
7227feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
7237feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
7247feb6bb8SMichael Mueller 
72569d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
726ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
727217a4406SHeiko Carstens 	if (sclp_has_siif())
728217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
729ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
730ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
73178c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
7325a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
7335a5e6536SMatthew Rosato 				      ICTL_TPROT;
7345a5e6536SMatthew Rosato 
735b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
736b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
737b31605c1SDominik Dingel 		if (rc)
738b31605c1SDominik Dingel 			return rc;
739b31288faSKonstantin Weitz 	}
7400ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
741ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
742453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
74392e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
7445102ee87STony Krowiak 
7455102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
7465102ee87STony Krowiak 
747b31605c1SDominik Dingel 	return rc;
748b0c632dbSHeiko Carstens }
749b0c632dbSHeiko Carstens 
750b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
751b0c632dbSHeiko Carstens 				      unsigned int id)
752b0c632dbSHeiko Carstens {
7534d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
7547feb6bb8SMichael Mueller 	struct sie_page *sie_page;
7554d47555aSCarsten Otte 	int rc = -EINVAL;
756b0c632dbSHeiko Carstens 
7574d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
7584d47555aSCarsten Otte 		goto out;
7594d47555aSCarsten Otte 
7604d47555aSCarsten Otte 	rc = -ENOMEM;
7614d47555aSCarsten Otte 
762b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
763b0c632dbSHeiko Carstens 	if (!vcpu)
7644d47555aSCarsten Otte 		goto out;
765b0c632dbSHeiko Carstens 
7667feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
7677feb6bb8SMichael Mueller 	if (!sie_page)
768b0c632dbSHeiko Carstens 		goto out_free_cpu;
769b0c632dbSHeiko Carstens 
7707feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
7717feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
7727feb6bb8SMichael Mueller 
773b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
77458f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
77558f9460bSCarsten Otte 		if (!kvm->arch.sca) {
77658f9460bSCarsten Otte 			WARN_ON_ONCE(1);
77758f9460bSCarsten Otte 			goto out_free_cpu;
77858f9460bSCarsten Otte 		}
779abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
78058f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
78158f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
78258f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
78358f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
784b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
785fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
78658f9460bSCarsten Otte 	}
787b0c632dbSHeiko Carstens 
788ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
789ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
790d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
7915288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
792ba5c1e9bSCarsten Otte 
793b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
794b0c632dbSHeiko Carstens 	if (rc)
7957b06bf2fSWei Yongjun 		goto out_free_sie_block;
796b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
797b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
798ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
799b0c632dbSHeiko Carstens 
800b0c632dbSHeiko Carstens 	return vcpu;
8017b06bf2fSWei Yongjun out_free_sie_block:
8027b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
803b0c632dbSHeiko Carstens out_free_cpu:
804b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
8054d47555aSCarsten Otte out:
806b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
807b0c632dbSHeiko Carstens }
808b0c632dbSHeiko Carstens 
809b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
810b0c632dbSHeiko Carstens {
8119a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
812b0c632dbSHeiko Carstens }
813b0c632dbSHeiko Carstens 
81449b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
81549b99e1eSChristian Borntraeger {
81649b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
81749b99e1eSChristian Borntraeger }
81849b99e1eSChristian Borntraeger 
81949b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
82049b99e1eSChristian Borntraeger {
82149b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
82249b99e1eSChristian Borntraeger }
82349b99e1eSChristian Borntraeger 
82449b99e1eSChristian Borntraeger /*
82549b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
82649b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
82749b99e1eSChristian Borntraeger  * return immediately. */
82849b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
82949b99e1eSChristian Borntraeger {
83049b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
83149b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
83249b99e1eSChristian Borntraeger 		cpu_relax();
83349b99e1eSChristian Borntraeger }
83449b99e1eSChristian Borntraeger 
83549b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
83649b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
83749b99e1eSChristian Borntraeger {
83849b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
83949b99e1eSChristian Borntraeger 	exit_sie(vcpu);
84049b99e1eSChristian Borntraeger }
84149b99e1eSChristian Borntraeger 
8422c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
8432c70fe44SChristian Borntraeger {
8442c70fe44SChristian Borntraeger 	int i;
8452c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
8462c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
8472c70fe44SChristian Borntraeger 
8482c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
8492c70fe44SChristian Borntraeger 		/* match against both prefix pages */
850fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
8512c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8522c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
8532c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
8542c70fe44SChristian Borntraeger 		}
8552c70fe44SChristian Borntraeger 	}
8562c70fe44SChristian Borntraeger }
8572c70fe44SChristian Borntraeger 
858b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
859b6d33834SChristoffer Dall {
860b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
861b6d33834SChristoffer Dall 	BUG();
862b6d33834SChristoffer Dall 	return 0;
863b6d33834SChristoffer Dall }
864b6d33834SChristoffer Dall 
86514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
86614eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
86714eebd91SCarsten Otte {
86814eebd91SCarsten Otte 	int r = -EINVAL;
86914eebd91SCarsten Otte 
87014eebd91SCarsten Otte 	switch (reg->id) {
87129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
87229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
87329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
87429b7c71bSCarsten Otte 		break;
87529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
87629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
87729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
87829b7c71bSCarsten Otte 		break;
87946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
88046a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
88146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
88246a6dd1cSJason J. herne 		break;
88346a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
88446a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
88546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
88646a6dd1cSJason J. herne 		break;
887536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
888536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
889536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
890536336c2SDominik Dingel 		break;
891536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
892536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
893536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
894536336c2SDominik Dingel 		break;
895536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
896536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
897536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
898536336c2SDominik Dingel 		break;
899672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
900672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
901672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
902672550fbSChristian Borntraeger 		break;
903afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
904afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
905afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
906afa45ff5SChristian Borntraeger 		break;
90714eebd91SCarsten Otte 	default:
90814eebd91SCarsten Otte 		break;
90914eebd91SCarsten Otte 	}
91014eebd91SCarsten Otte 
91114eebd91SCarsten Otte 	return r;
91214eebd91SCarsten Otte }
91314eebd91SCarsten Otte 
91414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
91514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
91614eebd91SCarsten Otte {
91714eebd91SCarsten Otte 	int r = -EINVAL;
91814eebd91SCarsten Otte 
91914eebd91SCarsten Otte 	switch (reg->id) {
92029b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
92129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
92229b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
92329b7c71bSCarsten Otte 		break;
92429b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
92529b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
92629b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
92729b7c71bSCarsten Otte 		break;
92846a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
92946a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
93046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
93146a6dd1cSJason J. herne 		break;
93246a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
93346a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
93446a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
93546a6dd1cSJason J. herne 		break;
936536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
937536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
938536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
939*9fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
940*9fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
941536336c2SDominik Dingel 		break;
942536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
943536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
944536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
945536336c2SDominik Dingel 		break;
946536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
947536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
948536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
949536336c2SDominik Dingel 		break;
950672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
951672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
952672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
953672550fbSChristian Borntraeger 		break;
954afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
955afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
956afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
957afa45ff5SChristian Borntraeger 		break;
95814eebd91SCarsten Otte 	default:
95914eebd91SCarsten Otte 		break;
96014eebd91SCarsten Otte 	}
96114eebd91SCarsten Otte 
96214eebd91SCarsten Otte 	return r;
96314eebd91SCarsten Otte }
964b6d33834SChristoffer Dall 
965b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
966b0c632dbSHeiko Carstens {
967b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
968b0c632dbSHeiko Carstens 	return 0;
969b0c632dbSHeiko Carstens }
970b0c632dbSHeiko Carstens 
971b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
972b0c632dbSHeiko Carstens {
9735a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
974b0c632dbSHeiko Carstens 	return 0;
975b0c632dbSHeiko Carstens }
976b0c632dbSHeiko Carstens 
977b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
978b0c632dbSHeiko Carstens {
9795a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
980b0c632dbSHeiko Carstens 	return 0;
981b0c632dbSHeiko Carstens }
982b0c632dbSHeiko Carstens 
983b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
984b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
985b0c632dbSHeiko Carstens {
98659674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
987b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
98859674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
989b0c632dbSHeiko Carstens 	return 0;
990b0c632dbSHeiko Carstens }
991b0c632dbSHeiko Carstens 
992b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
993b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
994b0c632dbSHeiko Carstens {
99559674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
996b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
997b0c632dbSHeiko Carstens 	return 0;
998b0c632dbSHeiko Carstens }
999b0c632dbSHeiko Carstens 
1000b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1001b0c632dbSHeiko Carstens {
10024725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
10034725c860SMartin Schwidefsky 		return -EINVAL;
1004b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
10054725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
10064725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10074725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1008b0c632dbSHeiko Carstens 	return 0;
1009b0c632dbSHeiko Carstens }
1010b0c632dbSHeiko Carstens 
1011b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1012b0c632dbSHeiko Carstens {
1013b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1014b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1015b0c632dbSHeiko Carstens 	return 0;
1016b0c632dbSHeiko Carstens }
1017b0c632dbSHeiko Carstens 
1018b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1019b0c632dbSHeiko Carstens {
1020b0c632dbSHeiko Carstens 	int rc = 0;
1021b0c632dbSHeiko Carstens 
10227a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1023b0c632dbSHeiko Carstens 		rc = -EBUSY;
1024d7b0b5ebSCarsten Otte 	else {
1025d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1026d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1027d7b0b5ebSCarsten Otte 	}
1028b0c632dbSHeiko Carstens 	return rc;
1029b0c632dbSHeiko Carstens }
1030b0c632dbSHeiko Carstens 
1031b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1032b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1033b0c632dbSHeiko Carstens {
1034b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1035b0c632dbSHeiko Carstens }
1036b0c632dbSHeiko Carstens 
103727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
103827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
103927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
104027291e21SDavid Hildenbrand 
1041d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1042d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1043b0c632dbSHeiko Carstens {
104427291e21SDavid Hildenbrand 	int rc = 0;
104527291e21SDavid Hildenbrand 
104627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
104727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
104827291e21SDavid Hildenbrand 
10492de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
105027291e21SDavid Hildenbrand 		return -EINVAL;
105127291e21SDavid Hildenbrand 
105227291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
105327291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
105427291e21SDavid Hildenbrand 		/* enforce guest PER */
105527291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
105627291e21SDavid Hildenbrand 
105727291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
105827291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
105927291e21SDavid Hildenbrand 	} else {
106027291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
106127291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
106227291e21SDavid Hildenbrand 	}
106327291e21SDavid Hildenbrand 
106427291e21SDavid Hildenbrand 	if (rc) {
106527291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
106627291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
106727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
106827291e21SDavid Hildenbrand 	}
106927291e21SDavid Hildenbrand 
107027291e21SDavid Hildenbrand 	return rc;
1071b0c632dbSHeiko Carstens }
1072b0c632dbSHeiko Carstens 
107362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
107462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
107562d9f0dbSMarcelo Tosatti {
10766352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
10776352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
10786352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
107962d9f0dbSMarcelo Tosatti }
108062d9f0dbSMarcelo Tosatti 
108162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
108262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
108362d9f0dbSMarcelo Tosatti {
10846352e4d2SDavid Hildenbrand 	int rc = 0;
10856352e4d2SDavid Hildenbrand 
10866352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
10876352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
10886352e4d2SDavid Hildenbrand 
10896352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
10906352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
10916352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
10926352e4d2SDavid Hildenbrand 		break;
10936352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
10946352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
10956352e4d2SDavid Hildenbrand 		break;
10966352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
10976352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
10986352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
10996352e4d2SDavid Hildenbrand 	default:
11006352e4d2SDavid Hildenbrand 		rc = -ENXIO;
11016352e4d2SDavid Hildenbrand 	}
11026352e4d2SDavid Hildenbrand 
11036352e4d2SDavid Hildenbrand 	return rc;
110462d9f0dbSMarcelo Tosatti }
110562d9f0dbSMarcelo Tosatti 
1106b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1107b31605c1SDominik Dingel {
1108b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1109b31605c1SDominik Dingel 		return false;
1110b31605c1SDominik Dingel 	/* only enable for z10 and later */
1111b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1112b31605c1SDominik Dingel 		return false;
1113b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1114b31605c1SDominik Dingel 		return false;
1115b31605c1SDominik Dingel 	return true;
1116b31605c1SDominik Dingel }
1117b31605c1SDominik Dingel 
11188ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
11198ad35755SDavid Hildenbrand {
11208ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
11218ad35755SDavid Hildenbrand }
11228ad35755SDavid Hildenbrand 
11232c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
11242c70fe44SChristian Borntraeger {
11258ad35755SDavid Hildenbrand retry:
11268ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
11272c70fe44SChristian Borntraeger 	/*
11282c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
11292c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
11302c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
11312c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
11322c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
11332c70fe44SChristian Borntraeger 	 */
11348ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
11352c70fe44SChristian Borntraeger 		int rc;
11362c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1137fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
11382c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
11392c70fe44SChristian Borntraeger 		if (rc)
11402c70fe44SChristian Borntraeger 			return rc;
11418ad35755SDavid Hildenbrand 		goto retry;
11422c70fe44SChristian Borntraeger 	}
11438ad35755SDavid Hildenbrand 
1144d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1145d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1146d3d692c8SDavid Hildenbrand 		goto retry;
1147d3d692c8SDavid Hildenbrand 	}
1148d3d692c8SDavid Hildenbrand 
11498ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
11508ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
11518ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
11528ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
11538ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
11548ad35755SDavid Hildenbrand 		}
11558ad35755SDavid Hildenbrand 		goto retry;
11568ad35755SDavid Hildenbrand 	}
11578ad35755SDavid Hildenbrand 
11588ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
11598ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
11608ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
11618ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
11628ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
11638ad35755SDavid Hildenbrand 		}
11648ad35755SDavid Hildenbrand 		goto retry;
11658ad35755SDavid Hildenbrand 	}
11668ad35755SDavid Hildenbrand 
11670759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
11680759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
11690759d068SDavid Hildenbrand 
11702c70fe44SChristian Borntraeger 	return 0;
11712c70fe44SChristian Borntraeger }
11722c70fe44SChristian Borntraeger 
1173fa576c58SThomas Huth /**
1174fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1175fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1176fa576c58SThomas Huth  * @gpa: Guest physical address
1177fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1178fa576c58SThomas Huth  *
1179fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1180fa576c58SThomas Huth  *
1181fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1182fa576c58SThomas Huth  */
1183fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
118424eb3a82SDominik Dingel {
1185527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1186527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
118724eb3a82SDominik Dingel }
118824eb3a82SDominik Dingel 
11893c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
11903c038e6bSDominik Dingel 				      unsigned long token)
11913c038e6bSDominik Dingel {
11923c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1193383d0b05SJens Freimann 	struct kvm_s390_irq irq;
11943c038e6bSDominik Dingel 
11953c038e6bSDominik Dingel 	if (start_token) {
1196383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1197383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1198383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
11993c038e6bSDominik Dingel 	} else {
12003c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1201383d0b05SJens Freimann 		inti.parm64 = token;
12023c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
12033c038e6bSDominik Dingel 	}
12043c038e6bSDominik Dingel }
12053c038e6bSDominik Dingel 
12063c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
12073c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
12083c038e6bSDominik Dingel {
12093c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
12103c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
12113c038e6bSDominik Dingel }
12123c038e6bSDominik Dingel 
12133c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
12143c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
12153c038e6bSDominik Dingel {
12163c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
12173c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
12183c038e6bSDominik Dingel }
12193c038e6bSDominik Dingel 
12203c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
12213c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
12223c038e6bSDominik Dingel {
12233c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
12243c038e6bSDominik Dingel }
12253c038e6bSDominik Dingel 
12263c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
12273c038e6bSDominik Dingel {
12283c038e6bSDominik Dingel 	/*
12293c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
12303c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
12313c038e6bSDominik Dingel 	 */
12323c038e6bSDominik Dingel 	return true;
12333c038e6bSDominik Dingel }
12343c038e6bSDominik Dingel 
12353c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
12363c038e6bSDominik Dingel {
12373c038e6bSDominik Dingel 	hva_t hva;
12383c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
12393c038e6bSDominik Dingel 	int rc;
12403c038e6bSDominik Dingel 
12413c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
12423c038e6bSDominik Dingel 		return 0;
12433c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
12443c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
12453c038e6bSDominik Dingel 		return 0;
12463c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
12473c038e6bSDominik Dingel 		return 0;
12489a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
12493c038e6bSDominik Dingel 		return 0;
12503c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
12513c038e6bSDominik Dingel 		return 0;
12523c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
12533c038e6bSDominik Dingel 		return 0;
12543c038e6bSDominik Dingel 
125581480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
125681480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
125781480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
12583c038e6bSDominik Dingel 		return 0;
12593c038e6bSDominik Dingel 
12603c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
12613c038e6bSDominik Dingel 	return rc;
12623c038e6bSDominik Dingel }
12633c038e6bSDominik Dingel 
12643fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1265b0c632dbSHeiko Carstens {
12663fb4c40fSThomas Huth 	int rc, cpuflags;
1267e168bf8dSCarsten Otte 
12683c038e6bSDominik Dingel 	/*
12693c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
12703c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
12713c038e6bSDominik Dingel 	 * handled outside the worker.
12723c038e6bSDominik Dingel 	 */
12733c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
12743c038e6bSDominik Dingel 
12755a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1276b0c632dbSHeiko Carstens 
1277b0c632dbSHeiko Carstens 	if (need_resched())
1278b0c632dbSHeiko Carstens 		schedule();
1279b0c632dbSHeiko Carstens 
1280d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
128171cde587SChristian Borntraeger 		s390_handle_mcck();
128271cde587SChristian Borntraeger 
128379395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
128479395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
128579395031SJens Freimann 		if (rc)
128679395031SJens Freimann 			return rc;
128779395031SJens Freimann 	}
12880ff31867SCarsten Otte 
12892c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
12902c70fe44SChristian Borntraeger 	if (rc)
12912c70fe44SChristian Borntraeger 		return rc;
12922c70fe44SChristian Borntraeger 
129327291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
129427291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
129527291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
129627291e21SDavid Hildenbrand 	}
129727291e21SDavid Hildenbrand 
1298b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
12993fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
13003fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
13013fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
13022b29a9fdSDominik Dingel 
13033fb4c40fSThomas Huth 	return 0;
13043fb4c40fSThomas Huth }
13053fb4c40fSThomas Huth 
13063fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
13073fb4c40fSThomas Huth {
130824eb3a82SDominik Dingel 	int rc = -1;
13092b29a9fdSDominik Dingel 
13102b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
13112b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
13122b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
13132b29a9fdSDominik Dingel 
131427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
131527291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
131627291e21SDavid Hildenbrand 
13173fb4c40fSThomas Huth 	if (exit_reason >= 0) {
13187c470539SMartin Schwidefsky 		rc = 0;
1319210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1320210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1321210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1322210b1607SThomas Huth 						current->thread.gmap_addr;
1323210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1324210b1607SThomas Huth 		rc = -EREMOTE;
132524eb3a82SDominik Dingel 
132624eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
13273c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
132824eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1329fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
133024eb3a82SDominik Dingel 			rc = 0;
1331fa576c58SThomas Huth 		} else {
1332fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1333fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1334fa576c58SThomas Huth 		}
133524eb3a82SDominik Dingel 	}
133624eb3a82SDominik Dingel 
133724eb3a82SDominik Dingel 	if (rc == -1) {
1338699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1339699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1340699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
13411f0d0f09SCarsten Otte 	}
1342b0c632dbSHeiko Carstens 
13435a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
13443fb4c40fSThomas Huth 
1345a76ccff6SThomas Huth 	if (rc == 0) {
1346a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
13472955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
13482955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1349a76ccff6SThomas Huth 		else
1350a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1351a76ccff6SThomas Huth 	}
1352a76ccff6SThomas Huth 
13533fb4c40fSThomas Huth 	return rc;
13543fb4c40fSThomas Huth }
13553fb4c40fSThomas Huth 
13563fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
13573fb4c40fSThomas Huth {
13583fb4c40fSThomas Huth 	int rc, exit_reason;
13593fb4c40fSThomas Huth 
1360800c1065SThomas Huth 	/*
1361800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1362800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1363800c1065SThomas Huth 	 */
1364800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1365800c1065SThomas Huth 
1366a76ccff6SThomas Huth 	do {
13673fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
13683fb4c40fSThomas Huth 		if (rc)
1369a76ccff6SThomas Huth 			break;
13703fb4c40fSThomas Huth 
1371800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
13723fb4c40fSThomas Huth 		/*
1373a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1374a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
13753fb4c40fSThomas Huth 		 */
13763fb4c40fSThomas Huth 		preempt_disable();
13773fb4c40fSThomas Huth 		kvm_guest_enter();
13783fb4c40fSThomas Huth 		preempt_enable();
1379a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1380a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
13813fb4c40fSThomas Huth 		kvm_guest_exit();
1382800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
13833fb4c40fSThomas Huth 
13843fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
138527291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
13863fb4c40fSThomas Huth 
1387800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1388e168bf8dSCarsten Otte 	return rc;
1389b0c632dbSHeiko Carstens }
1390b0c632dbSHeiko Carstens 
1391b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1392b028ee3eSDavid Hildenbrand {
1393b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1394b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1395b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1396b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1397b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1398b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1399d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1400d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1401b028ee3eSDavid Hildenbrand 	}
1402b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1403b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1404b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1405b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1406b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1407b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1408b028ee3eSDavid Hildenbrand 	}
1409b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1410b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1411b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1412b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1413*9fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1414*9fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1415b028ee3eSDavid Hildenbrand 	}
1416b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1417b028ee3eSDavid Hildenbrand }
1418b028ee3eSDavid Hildenbrand 
1419b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1420b028ee3eSDavid Hildenbrand {
1421b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1422b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1423b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1424b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1425b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1426b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1427b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1428b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1429b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1430b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1431b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1432b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1433b028ee3eSDavid Hildenbrand }
1434b028ee3eSDavid Hildenbrand 
1435b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1436b0c632dbSHeiko Carstens {
14378f2abe6aSChristian Borntraeger 	int rc;
1438b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1439b0c632dbSHeiko Carstens 
144027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
144127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
144227291e21SDavid Hildenbrand 		return 0;
144327291e21SDavid Hildenbrand 	}
144427291e21SDavid Hildenbrand 
1445b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1446b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1447b0c632dbSHeiko Carstens 
14486352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
14496852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
14506352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
14516352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
14526352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
14536352e4d2SDavid Hildenbrand 		return -EINVAL;
14546352e4d2SDavid Hildenbrand 	}
1455b0c632dbSHeiko Carstens 
1456b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1457d7b0b5ebSCarsten Otte 
1458dab4079dSHeiko Carstens 	might_fault();
1459e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
14609ace903dSChristian Ehrhardt 
1461b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1462b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
14638f2abe6aSChristian Borntraeger 		rc = -EINTR;
1464b1d16c49SChristian Ehrhardt 	}
14658f2abe6aSChristian Borntraeger 
146627291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
146727291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
146827291e21SDavid Hildenbrand 		rc = 0;
146927291e21SDavid Hildenbrand 	}
147027291e21SDavid Hildenbrand 
1471b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
14728f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
14738f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
14748f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
14758f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
14768f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
14778f2abe6aSChristian Borntraeger 		rc = 0;
14788f2abe6aSChristian Borntraeger 	}
14798f2abe6aSChristian Borntraeger 
14808f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
14818f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
14828f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
14838f2abe6aSChristian Borntraeger 		rc = 0;
14848f2abe6aSChristian Borntraeger 	}
14858f2abe6aSChristian Borntraeger 
1486b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1487d7b0b5ebSCarsten Otte 
1488b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1489b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1490b0c632dbSHeiko Carstens 
1491b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
14927e8e6ab4SHeiko Carstens 	return rc;
1493b0c632dbSHeiko Carstens }
1494b0c632dbSHeiko Carstens 
1495b0c632dbSHeiko Carstens /*
1496b0c632dbSHeiko Carstens  * store status at address
1497b0c632dbSHeiko Carstens  * we use have two special cases:
1498b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1499b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1500b0c632dbSHeiko Carstens  */
1501d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1502b0c632dbSHeiko Carstens {
1503092670cdSCarsten Otte 	unsigned char archmode = 1;
1504fda902cbSMichael Mueller 	unsigned int px;
1505178bd789SThomas Huth 	u64 clkcomp;
1506d0bce605SHeiko Carstens 	int rc;
1507b0c632dbSHeiko Carstens 
1508d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1509d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1510b0c632dbSHeiko Carstens 			return -EFAULT;
1511d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1512d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1513d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1514b0c632dbSHeiko Carstens 			return -EFAULT;
1515d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1516d0bce605SHeiko Carstens 	}
1517d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1518d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1519d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1520d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1521d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1522d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1523fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1524d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1525fda902cbSMichael Mueller 			      &px, 4);
1526d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1527d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1528d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1529d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1530d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1531d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1532d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1533178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1534d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1535d0bce605SHeiko Carstens 			      &clkcomp, 8);
1536d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1537d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1538d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1539d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1540d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1541b0c632dbSHeiko Carstens }
1542b0c632dbSHeiko Carstens 
1543e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1544e879892cSThomas Huth {
1545e879892cSThomas Huth 	/*
1546e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1547e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1548e879892cSThomas Huth 	 * it into the save area
1549e879892cSThomas Huth 	 */
1550e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1551e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1552e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1553e879892cSThomas Huth 
1554e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1555e879892cSThomas Huth }
1556e879892cSThomas Huth 
15578ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
15588ad35755SDavid Hildenbrand {
15598ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
15608ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
15618ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
15628ad35755SDavid Hildenbrand }
15638ad35755SDavid Hildenbrand 
15648ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
15658ad35755SDavid Hildenbrand {
15668ad35755SDavid Hildenbrand 	unsigned int i;
15678ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
15688ad35755SDavid Hildenbrand 
15698ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
15708ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
15718ad35755SDavid Hildenbrand 	}
15728ad35755SDavid Hildenbrand }
15738ad35755SDavid Hildenbrand 
15748ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
15758ad35755SDavid Hildenbrand {
15768ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
15778ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
15788ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
15798ad35755SDavid Hildenbrand }
15808ad35755SDavid Hildenbrand 
15816852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
15826852d7b6SDavid Hildenbrand {
15838ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15848ad35755SDavid Hildenbrand 
15858ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
15868ad35755SDavid Hildenbrand 		return;
15878ad35755SDavid Hildenbrand 
15886852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
15898ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1590433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15918ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15928ad35755SDavid Hildenbrand 
15938ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15948ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
15958ad35755SDavid Hildenbrand 			started_vcpus++;
15968ad35755SDavid Hildenbrand 	}
15978ad35755SDavid Hildenbrand 
15988ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
15998ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
16008ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
16018ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
16028ad35755SDavid Hildenbrand 		/*
16038ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
16048ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
16058ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
16068ad35755SDavid Hildenbrand 		 */
16078ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
16088ad35755SDavid Hildenbrand 	}
16098ad35755SDavid Hildenbrand 
16106852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
16118ad35755SDavid Hildenbrand 	/*
16128ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
16138ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
16148ad35755SDavid Hildenbrand 	 */
1615d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1616433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
16178ad35755SDavid Hildenbrand 	return;
16186852d7b6SDavid Hildenbrand }
16196852d7b6SDavid Hildenbrand 
16206852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
16216852d7b6SDavid Hildenbrand {
16228ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
16238ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
16248ad35755SDavid Hildenbrand 
16258ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
16268ad35755SDavid Hildenbrand 		return;
16278ad35755SDavid Hildenbrand 
16286852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
16298ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1630433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
16318ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
16328ad35755SDavid Hildenbrand 
163332f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
16346cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
163532f5ff63SDavid Hildenbrand 
16366cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
16378ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
16388ad35755SDavid Hildenbrand 
16398ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
16408ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
16418ad35755SDavid Hildenbrand 			started_vcpus++;
16428ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
16438ad35755SDavid Hildenbrand 		}
16448ad35755SDavid Hildenbrand 	}
16458ad35755SDavid Hildenbrand 
16468ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
16478ad35755SDavid Hildenbrand 		/*
16488ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
16498ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
16508ad35755SDavid Hildenbrand 		 */
16518ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
16528ad35755SDavid Hildenbrand 	}
16538ad35755SDavid Hildenbrand 
1654433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
16558ad35755SDavid Hildenbrand 	return;
16566852d7b6SDavid Hildenbrand }
16576852d7b6SDavid Hildenbrand 
1658d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1659d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1660d6712df9SCornelia Huck {
1661d6712df9SCornelia Huck 	int r;
1662d6712df9SCornelia Huck 
1663d6712df9SCornelia Huck 	if (cap->flags)
1664d6712df9SCornelia Huck 		return -EINVAL;
1665d6712df9SCornelia Huck 
1666d6712df9SCornelia Huck 	switch (cap->cap) {
1667fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1668fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1669fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1670fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1671fa6b7fe9SCornelia Huck 		}
1672fa6b7fe9SCornelia Huck 		r = 0;
1673fa6b7fe9SCornelia Huck 		break;
1674d6712df9SCornelia Huck 	default:
1675d6712df9SCornelia Huck 		r = -EINVAL;
1676d6712df9SCornelia Huck 		break;
1677d6712df9SCornelia Huck 	}
1678d6712df9SCornelia Huck 	return r;
1679d6712df9SCornelia Huck }
1680d6712df9SCornelia Huck 
1681b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1682b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1683b0c632dbSHeiko Carstens {
1684b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1685b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1686800c1065SThomas Huth 	int idx;
1687bc923cc9SAvi Kivity 	long r;
1688b0c632dbSHeiko Carstens 
168993736624SAvi Kivity 	switch (ioctl) {
169093736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1691ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1692383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
1693ba5c1e9bSCarsten Otte 
169493736624SAvi Kivity 		r = -EFAULT;
1695ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
169693736624SAvi Kivity 			break;
1697383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
1698383d0b05SJens Freimann 			return -EINVAL;
1699383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
170093736624SAvi Kivity 		break;
1701ba5c1e9bSCarsten Otte 	}
1702b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1703800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1704bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1705800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1706bc923cc9SAvi Kivity 		break;
1707b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1708b0c632dbSHeiko Carstens 		psw_t psw;
1709b0c632dbSHeiko Carstens 
1710bc923cc9SAvi Kivity 		r = -EFAULT;
1711b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1712bc923cc9SAvi Kivity 			break;
1713bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1714bc923cc9SAvi Kivity 		break;
1715b0c632dbSHeiko Carstens 	}
1716b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1717bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1718bc923cc9SAvi Kivity 		break;
171914eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
172014eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
172114eebd91SCarsten Otte 		struct kvm_one_reg reg;
172214eebd91SCarsten Otte 		r = -EFAULT;
172314eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
172414eebd91SCarsten Otte 			break;
172514eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
172614eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
172714eebd91SCarsten Otte 		else
172814eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
172914eebd91SCarsten Otte 		break;
173014eebd91SCarsten Otte 	}
173127e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
173227e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
173327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
173427e0393fSCarsten Otte 
173527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
173627e0393fSCarsten Otte 			r = -EFAULT;
173727e0393fSCarsten Otte 			break;
173827e0393fSCarsten Otte 		}
173927e0393fSCarsten Otte 
174027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
174127e0393fSCarsten Otte 			r = -EINVAL;
174227e0393fSCarsten Otte 			break;
174327e0393fSCarsten Otte 		}
174427e0393fSCarsten Otte 
174527e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
174627e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
174727e0393fSCarsten Otte 		break;
174827e0393fSCarsten Otte 	}
174927e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
175027e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
175127e0393fSCarsten Otte 
175227e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
175327e0393fSCarsten Otte 			r = -EFAULT;
175427e0393fSCarsten Otte 			break;
175527e0393fSCarsten Otte 		}
175627e0393fSCarsten Otte 
175727e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
175827e0393fSCarsten Otte 			r = -EINVAL;
175927e0393fSCarsten Otte 			break;
176027e0393fSCarsten Otte 		}
176127e0393fSCarsten Otte 
176227e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
176327e0393fSCarsten Otte 			ucasmap.length);
176427e0393fSCarsten Otte 		break;
176527e0393fSCarsten Otte 	}
176627e0393fSCarsten Otte #endif
1767ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1768527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
1769ccc7910fSCarsten Otte 		break;
1770ccc7910fSCarsten Otte 	}
1771d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1772d6712df9SCornelia Huck 	{
1773d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1774d6712df9SCornelia Huck 		r = -EFAULT;
1775d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1776d6712df9SCornelia Huck 			break;
1777d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1778d6712df9SCornelia Huck 		break;
1779d6712df9SCornelia Huck 	}
1780b0c632dbSHeiko Carstens 	default:
17813e6afcf1SCarsten Otte 		r = -ENOTTY;
1782b0c632dbSHeiko Carstens 	}
1783bc923cc9SAvi Kivity 	return r;
1784b0c632dbSHeiko Carstens }
1785b0c632dbSHeiko Carstens 
17865b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
17875b1c1493SCarsten Otte {
17885b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
17895b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
17905b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
17915b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
17925b1c1493SCarsten Otte 		get_page(vmf->page);
17935b1c1493SCarsten Otte 		return 0;
17945b1c1493SCarsten Otte 	}
17955b1c1493SCarsten Otte #endif
17965b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
17975b1c1493SCarsten Otte }
17985b1c1493SCarsten Otte 
17995587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
18005587027cSAneesh Kumar K.V 			    unsigned long npages)
1801db3fe4ebSTakuya Yoshikawa {
1802db3fe4ebSTakuya Yoshikawa 	return 0;
1803db3fe4ebSTakuya Yoshikawa }
1804db3fe4ebSTakuya Yoshikawa 
1805b0c632dbSHeiko Carstens /* Section: memory related */
1806f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1807f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
18087b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
18097b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1810b0c632dbSHeiko Carstens {
1811dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1812dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1813dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1814dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1815b0c632dbSHeiko Carstens 
1816598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1817b0c632dbSHeiko Carstens 		return -EINVAL;
1818b0c632dbSHeiko Carstens 
1819598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1820b0c632dbSHeiko Carstens 		return -EINVAL;
1821b0c632dbSHeiko Carstens 
1822f7784b8eSMarcelo Tosatti 	return 0;
1823f7784b8eSMarcelo Tosatti }
1824f7784b8eSMarcelo Tosatti 
1825f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1826f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
18278482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
18288482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1829f7784b8eSMarcelo Tosatti {
1830f7850c92SCarsten Otte 	int rc;
1831f7784b8eSMarcelo Tosatti 
18322cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
18332cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
18342cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
18352cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
18362cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
18372cef4debSChristian Borntraeger 	 */
18382cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
18392cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
18402cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
18412cef4debSChristian Borntraeger 		return;
1842598841caSCarsten Otte 
1843598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1844598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1845598841caSCarsten Otte 	if (rc)
1846f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1847598841caSCarsten Otte 	return;
1848b0c632dbSHeiko Carstens }
1849b0c632dbSHeiko Carstens 
1850b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1851b0c632dbSHeiko Carstens {
1852ef50f7acSChristian Borntraeger 	int ret;
18530ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1854ef50f7acSChristian Borntraeger 	if (ret)
1855ef50f7acSChristian Borntraeger 		return ret;
1856ef50f7acSChristian Borntraeger 
1857ef50f7acSChristian Borntraeger 	/*
1858ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
185925985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1860ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1861ef50f7acSChristian Borntraeger 	 */
186278c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
186378c4b59fSMichael Mueller 	if (!vfacilities) {
1864ef50f7acSChristian Borntraeger 		kvm_exit();
1865ef50f7acSChristian Borntraeger 		return -ENOMEM;
1866ef50f7acSChristian Borntraeger 	}
186778c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
18687be81a46SChristian Borntraeger 	vfacilities[0] &= 0xff82fffbf47c2000UL;
18697feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1870ef50f7acSChristian Borntraeger 	return 0;
1871b0c632dbSHeiko Carstens }
1872b0c632dbSHeiko Carstens 
1873b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1874b0c632dbSHeiko Carstens {
187578c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1876b0c632dbSHeiko Carstens 	kvm_exit();
1877b0c632dbSHeiko Carstens }
1878b0c632dbSHeiko Carstens 
1879b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1880b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1881566af940SCornelia Huck 
1882566af940SCornelia Huck /*
1883566af940SCornelia Huck  * Enable autoloading of the kvm module.
1884566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1885566af940SCornelia Huck  * since x86 takes a different approach.
1886566af940SCornelia Huck  */
1887566af940SCornelia Huck #include <linux/miscdevice.h>
1888566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1889566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1890