xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 2444b352c3acf54897b0e2803a7c4e66699f9f43)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
54f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
58ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
597697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
60ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6769d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
69453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
70453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
71453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
72453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
738a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
75453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
76b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
77453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
78453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
79bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
805288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
827697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
895288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9342cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
95388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
96e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9741628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
98b0c632dbSHeiko Carstens 	{ NULL }
99b0c632dbSHeiko Carstens };
100b0c632dbSHeiko Carstens 
10178c4b59fSMichael Mueller unsigned long *vfacilities;
1022c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
103b0c632dbSHeiko Carstens 
10478c4b59fSMichael Mueller /* test availability of vfacility */
105280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
10678c4b59fSMichael Mueller {
10778c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10878c4b59fSMichael Mueller }
10978c4b59fSMichael Mueller 
110b0c632dbSHeiko Carstens /* Section: not file related */
11113a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
112b0c632dbSHeiko Carstens {
113b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
11410474ae8SAlexander Graf 	return 0;
115b0c632dbSHeiko Carstens }
116b0c632dbSHeiko Carstens 
1172c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1182c70fe44SChristian Borntraeger 
119b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
120b0c632dbSHeiko Carstens {
1212c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1222c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
123b0c632dbSHeiko Carstens 	return 0;
124b0c632dbSHeiko Carstens }
125b0c632dbSHeiko Carstens 
126b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
127b0c632dbSHeiko Carstens {
1282c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
13384877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
13484877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens /* Section: device related */
138b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
139b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
140b0c632dbSHeiko Carstens {
141b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
142b0c632dbSHeiko Carstens 		return s390_enable_sie();
143b0c632dbSHeiko Carstens 	return -EINVAL;
144b0c632dbSHeiko Carstens }
145b0c632dbSHeiko Carstens 
146784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
147b0c632dbSHeiko Carstens {
148d7b0b5ebSCarsten Otte 	int r;
149d7b0b5ebSCarsten Otte 
1502bd0ac4eSCarsten Otte 	switch (ext) {
151d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
152b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15352e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1541efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1551efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1561efd0f59SCarsten Otte #endif
1573c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
15860b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
15914eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
160d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
161fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
162ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16310ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
164c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
165d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
16678599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
167f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1686352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
169*2444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
170d7b0b5ebSCarsten Otte 		r = 1;
171d7b0b5ebSCarsten Otte 		break;
172e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
173e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
174e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
175e726b1bdSChristian Borntraeger 		break;
176e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
177e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
178e1e2e605SNick Wang 		break;
1791526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
180abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1811526bf9cSChristian Borntraeger 		break;
1822bd0ac4eSCarsten Otte 	default:
183d7b0b5ebSCarsten Otte 		r = 0;
184b0c632dbSHeiko Carstens 	}
185d7b0b5ebSCarsten Otte 	return r;
1862bd0ac4eSCarsten Otte }
187b0c632dbSHeiko Carstens 
18815f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18915f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
19015f36ebdSJason J. Herne {
19115f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19215f36ebdSJason J. Herne 	unsigned long address;
19315f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19415f36ebdSJason J. Herne 
19515f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19615f36ebdSJason J. Herne 	/* Loop over all guest pages */
19715f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19815f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19915f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
20015f36ebdSJason J. Herne 
20115f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20215f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20315f36ebdSJason J. Herne 	}
20415f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20515f36ebdSJason J. Herne }
20615f36ebdSJason J. Herne 
207b0c632dbSHeiko Carstens /* Section: vm related */
208b0c632dbSHeiko Carstens /*
209b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
210b0c632dbSHeiko Carstens  */
211b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
213b0c632dbSHeiko Carstens {
21415f36ebdSJason J. Herne 	int r;
21515f36ebdSJason J. Herne 	unsigned long n;
21615f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21715f36ebdSJason J. Herne 	int is_dirty = 0;
21815f36ebdSJason J. Herne 
21915f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
22015f36ebdSJason J. Herne 
22115f36ebdSJason J. Herne 	r = -EINVAL;
22215f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22315f36ebdSJason J. Herne 		goto out;
22415f36ebdSJason J. Herne 
22515f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22615f36ebdSJason J. Herne 	r = -ENOENT;
22715f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22815f36ebdSJason J. Herne 		goto out;
22915f36ebdSJason J. Herne 
23015f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23115f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23215f36ebdSJason J. Herne 	if (r)
23315f36ebdSJason J. Herne 		goto out;
23415f36ebdSJason J. Herne 
23515f36ebdSJason J. Herne 	/* Clear the dirty log */
23615f36ebdSJason J. Herne 	if (is_dirty) {
23715f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23815f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23915f36ebdSJason J. Herne 	}
24015f36ebdSJason J. Herne 	r = 0;
24115f36ebdSJason J. Herne out:
24215f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24315f36ebdSJason J. Herne 	return r;
244b0c632dbSHeiko Carstens }
245b0c632dbSHeiko Carstens 
246d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247d938dc55SCornelia Huck {
248d938dc55SCornelia Huck 	int r;
249d938dc55SCornelia Huck 
250d938dc55SCornelia Huck 	if (cap->flags)
251d938dc55SCornelia Huck 		return -EINVAL;
252d938dc55SCornelia Huck 
253d938dc55SCornelia Huck 	switch (cap->cap) {
25484223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25584223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25684223598SCornelia Huck 		r = 0;
25784223598SCornelia Huck 		break;
258*2444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
259*2444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
260*2444b352SDavid Hildenbrand 		r = 0;
261*2444b352SDavid Hildenbrand 		break;
262d938dc55SCornelia Huck 	default:
263d938dc55SCornelia Huck 		r = -EINVAL;
264d938dc55SCornelia Huck 		break;
265d938dc55SCornelia Huck 	}
266d938dc55SCornelia Huck 	return r;
267d938dc55SCornelia Huck }
268d938dc55SCornelia Huck 
2698c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2708c0a7ce6SDominik Dingel {
2718c0a7ce6SDominik Dingel 	int ret;
2728c0a7ce6SDominik Dingel 
2738c0a7ce6SDominik Dingel 	switch (attr->attr) {
2748c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2758c0a7ce6SDominik Dingel 		ret = 0;
2768c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2778c0a7ce6SDominik Dingel 			ret = -EFAULT;
2788c0a7ce6SDominik Dingel 		break;
2798c0a7ce6SDominik Dingel 	default:
2808c0a7ce6SDominik Dingel 		ret = -ENXIO;
2818c0a7ce6SDominik Dingel 		break;
2828c0a7ce6SDominik Dingel 	}
2838c0a7ce6SDominik Dingel 	return ret;
2848c0a7ce6SDominik Dingel }
2858c0a7ce6SDominik Dingel 
2868c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2874f718eabSDominik Dingel {
2884f718eabSDominik Dingel 	int ret;
2894f718eabSDominik Dingel 	unsigned int idx;
2904f718eabSDominik Dingel 	switch (attr->attr) {
2914f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2924f718eabSDominik Dingel 		ret = -EBUSY;
2934f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2944f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2954f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2964f718eabSDominik Dingel 			ret = 0;
2974f718eabSDominik Dingel 		}
2984f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2994f718eabSDominik Dingel 		break;
3004f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3014f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3024f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
303a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3044f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3054f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3064f718eabSDominik Dingel 		ret = 0;
3074f718eabSDominik Dingel 		break;
3088c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3098c0a7ce6SDominik Dingel 		unsigned long new_limit;
3108c0a7ce6SDominik Dingel 
3118c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3128c0a7ce6SDominik Dingel 			return -EINVAL;
3138c0a7ce6SDominik Dingel 
3148c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3158c0a7ce6SDominik Dingel 			return -EFAULT;
3168c0a7ce6SDominik Dingel 
3178c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3188c0a7ce6SDominik Dingel 			return -E2BIG;
3198c0a7ce6SDominik Dingel 
3208c0a7ce6SDominik Dingel 		ret = -EBUSY;
3218c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3228c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3238c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3248c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3258c0a7ce6SDominik Dingel 
3268c0a7ce6SDominik Dingel 			if (!new) {
3278c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3288c0a7ce6SDominik Dingel 			} else {
3298c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3308c0a7ce6SDominik Dingel 				new->private = kvm;
3318c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3328c0a7ce6SDominik Dingel 				ret = 0;
3338c0a7ce6SDominik Dingel 			}
3348c0a7ce6SDominik Dingel 		}
3358c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3368c0a7ce6SDominik Dingel 		break;
3378c0a7ce6SDominik Dingel 	}
3384f718eabSDominik Dingel 	default:
3394f718eabSDominik Dingel 		ret = -ENXIO;
3404f718eabSDominik Dingel 		break;
3414f718eabSDominik Dingel 	}
3424f718eabSDominik Dingel 	return ret;
3434f718eabSDominik Dingel }
3444f718eabSDominik Dingel 
345f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
346f2061656SDominik Dingel {
347f2061656SDominik Dingel 	int ret;
348f2061656SDominik Dingel 
349f2061656SDominik Dingel 	switch (attr->group) {
3504f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3518c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
3524f718eabSDominik Dingel 		break;
353f2061656SDominik Dingel 	default:
354f2061656SDominik Dingel 		ret = -ENXIO;
355f2061656SDominik Dingel 		break;
356f2061656SDominik Dingel 	}
357f2061656SDominik Dingel 
358f2061656SDominik Dingel 	return ret;
359f2061656SDominik Dingel }
360f2061656SDominik Dingel 
361f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
362f2061656SDominik Dingel {
3638c0a7ce6SDominik Dingel 	int ret;
3648c0a7ce6SDominik Dingel 
3658c0a7ce6SDominik Dingel 	switch (attr->group) {
3668c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3678c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
3688c0a7ce6SDominik Dingel 		break;
3698c0a7ce6SDominik Dingel 	default:
3708c0a7ce6SDominik Dingel 		ret = -ENXIO;
3718c0a7ce6SDominik Dingel 		break;
3728c0a7ce6SDominik Dingel 	}
3738c0a7ce6SDominik Dingel 
3748c0a7ce6SDominik Dingel 	return ret;
375f2061656SDominik Dingel }
376f2061656SDominik Dingel 
377f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
378f2061656SDominik Dingel {
379f2061656SDominik Dingel 	int ret;
380f2061656SDominik Dingel 
381f2061656SDominik Dingel 	switch (attr->group) {
3824f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
3834f718eabSDominik Dingel 		switch (attr->attr) {
3844f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
3854f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
3868c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
3874f718eabSDominik Dingel 			ret = 0;
3884f718eabSDominik Dingel 			break;
3894f718eabSDominik Dingel 		default:
3904f718eabSDominik Dingel 			ret = -ENXIO;
3914f718eabSDominik Dingel 			break;
3924f718eabSDominik Dingel 		}
3934f718eabSDominik Dingel 		break;
394f2061656SDominik Dingel 	default:
395f2061656SDominik Dingel 		ret = -ENXIO;
396f2061656SDominik Dingel 		break;
397f2061656SDominik Dingel 	}
398f2061656SDominik Dingel 
399f2061656SDominik Dingel 	return ret;
400f2061656SDominik Dingel }
401f2061656SDominik Dingel 
402b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
403b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
404b0c632dbSHeiko Carstens {
405b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
406b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
407f2061656SDominik Dingel 	struct kvm_device_attr attr;
408b0c632dbSHeiko Carstens 	int r;
409b0c632dbSHeiko Carstens 
410b0c632dbSHeiko Carstens 	switch (ioctl) {
411ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
412ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
413ba5c1e9bSCarsten Otte 
414ba5c1e9bSCarsten Otte 		r = -EFAULT;
415ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
416ba5c1e9bSCarsten Otte 			break;
417ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
418ba5c1e9bSCarsten Otte 		break;
419ba5c1e9bSCarsten Otte 	}
420d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
421d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
422d938dc55SCornelia Huck 		r = -EFAULT;
423d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
424d938dc55SCornelia Huck 			break;
425d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
426d938dc55SCornelia Huck 		break;
427d938dc55SCornelia Huck 	}
42884223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
42984223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
43084223598SCornelia Huck 
43184223598SCornelia Huck 		r = -EINVAL;
43284223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
43384223598SCornelia Huck 			/* Set up dummy routing. */
43484223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
43584223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
43684223598SCornelia Huck 			r = 0;
43784223598SCornelia Huck 		}
43884223598SCornelia Huck 		break;
43984223598SCornelia Huck 	}
440f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
441f2061656SDominik Dingel 		r = -EFAULT;
442f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
443f2061656SDominik Dingel 			break;
444f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
445f2061656SDominik Dingel 		break;
446f2061656SDominik Dingel 	}
447f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
448f2061656SDominik Dingel 		r = -EFAULT;
449f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
450f2061656SDominik Dingel 			break;
451f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
452f2061656SDominik Dingel 		break;
453f2061656SDominik Dingel 	}
454f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
455f2061656SDominik Dingel 		r = -EFAULT;
456f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
457f2061656SDominik Dingel 			break;
458f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
459f2061656SDominik Dingel 		break;
460f2061656SDominik Dingel 	}
461b0c632dbSHeiko Carstens 	default:
462367e1319SAvi Kivity 		r = -ENOTTY;
463b0c632dbSHeiko Carstens 	}
464b0c632dbSHeiko Carstens 
465b0c632dbSHeiko Carstens 	return r;
466b0c632dbSHeiko Carstens }
467b0c632dbSHeiko Carstens 
4685102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
4695102ee87STony Krowiak {
4705102ee87STony Krowiak 	if (!test_vfacility(76))
4715102ee87STony Krowiak 		return 0;
4725102ee87STony Krowiak 
4735102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
4745102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
4755102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
4765102ee87STony Krowiak 		return -ENOMEM;
4775102ee87STony Krowiak 
4785102ee87STony Krowiak 	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
4795102ee87STony Krowiak 				  CRYCB_FORMAT1;
4805102ee87STony Krowiak 
4815102ee87STony Krowiak 	return 0;
4825102ee87STony Krowiak }
4835102ee87STony Krowiak 
484e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
485b0c632dbSHeiko Carstens {
486b0c632dbSHeiko Carstens 	int rc;
487b0c632dbSHeiko Carstens 	char debug_name[16];
488f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
489b0c632dbSHeiko Carstens 
490e08b9637SCarsten Otte 	rc = -EINVAL;
491e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
492e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
493e08b9637SCarsten Otte 		goto out_err;
494e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
495e08b9637SCarsten Otte 		goto out_err;
496e08b9637SCarsten Otte #else
497e08b9637SCarsten Otte 	if (type)
498e08b9637SCarsten Otte 		goto out_err;
499e08b9637SCarsten Otte #endif
500e08b9637SCarsten Otte 
501b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
502b0c632dbSHeiko Carstens 	if (rc)
503d89f5effSJan Kiszka 		goto out_err;
504b0c632dbSHeiko Carstens 
505b290411aSCarsten Otte 	rc = -ENOMEM;
506b290411aSCarsten Otte 
507b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
508b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
509d89f5effSJan Kiszka 		goto out_err;
510f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
511f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
512f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
513f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
514b0c632dbSHeiko Carstens 
515b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
516b0c632dbSHeiko Carstens 
517b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
518b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
519b0c632dbSHeiko Carstens 		goto out_nodbf;
520b0c632dbSHeiko Carstens 
5215102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
5225102ee87STony Krowiak 		goto out_crypto;
5235102ee87STony Krowiak 
524ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
525ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
5268a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
527a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
528ba5c1e9bSCarsten Otte 
529b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
530b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
531b0c632dbSHeiko Carstens 
532e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
533e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
534e08b9637SCarsten Otte 	} else {
5350349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
536598841caSCarsten Otte 		if (!kvm->arch.gmap)
537598841caSCarsten Otte 			goto out_nogmap;
5382c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
53924eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
540e08b9637SCarsten Otte 	}
541fa6b7fe9SCornelia Huck 
542fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
54384223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
544fa6b7fe9SCornelia Huck 
5458ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
5468ad35755SDavid Hildenbrand 
547d89f5effSJan Kiszka 	return 0;
548598841caSCarsten Otte out_nogmap:
5495102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
5505102ee87STony Krowiak out_crypto:
551598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
552b0c632dbSHeiko Carstens out_nodbf:
553b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
554d89f5effSJan Kiszka out_err:
555d89f5effSJan Kiszka 	return rc;
556b0c632dbSHeiko Carstens }
557b0c632dbSHeiko Carstens 
558d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
559d329c035SChristian Borntraeger {
560d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
561ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
56267335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
5633c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
56458f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
56558f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
56658f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
567abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
568abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
569abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
57058f9460bSCarsten Otte 	}
571abf4a71eSCarsten Otte 	smp_mb();
57227e0393fSCarsten Otte 
57327e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
57427e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
57527e0393fSCarsten Otte 
576b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
577b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
578d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
579b31288faSKonstantin Weitz 
5806692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
581b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
582d329c035SChristian Borntraeger }
583d329c035SChristian Borntraeger 
584d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
585d329c035SChristian Borntraeger {
586d329c035SChristian Borntraeger 	unsigned int i;
587988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
588d329c035SChristian Borntraeger 
589988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
590988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
591988a2caeSGleb Natapov 
592988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
593988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
594d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
595988a2caeSGleb Natapov 
596988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
597988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
598d329c035SChristian Borntraeger }
599d329c035SChristian Borntraeger 
600b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
601b0c632dbSHeiko Carstens {
602d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
603b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
604d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
6055102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
60627e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
607598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
608841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
60967335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
610b0c632dbSHeiko Carstens }
611b0c632dbSHeiko Carstens 
612b0c632dbSHeiko Carstens /* Section: vcpu related */
613dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
614b0c632dbSHeiko Carstens {
615c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
61627e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
61727e0393fSCarsten Otte 		return -ENOMEM;
6182c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
619dafd032aSDominik Dingel 
62027e0393fSCarsten Otte 	return 0;
62127e0393fSCarsten Otte }
62227e0393fSCarsten Otte 
623dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
624dafd032aSDominik Dingel {
625dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
626dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
62759674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
62859674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
6299eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
630b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
631b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
632b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
633dafd032aSDominik Dingel 
634dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
635dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
636dafd032aSDominik Dingel 
637b0c632dbSHeiko Carstens 	return 0;
638b0c632dbSHeiko Carstens }
639b0c632dbSHeiko Carstens 
640b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
641b0c632dbSHeiko Carstens {
6424725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
6434725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
644b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
6454725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
6464725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
64759674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
648480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
6499e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
650b0c632dbSHeiko Carstens }
651b0c632dbSHeiko Carstens 
652b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
653b0c632dbSHeiko Carstens {
6549e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
655480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
6564725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
6574725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
65859674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
6594725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
6604725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
661b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
662b0c632dbSHeiko Carstens }
663b0c632dbSHeiko Carstens 
664b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
665b0c632dbSHeiko Carstens {
666b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
667b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
668b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
6698d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
670b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
671b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
672b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
673b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
674b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
675b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
676b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
677b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
678b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
679672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
6803c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
6813c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
6826352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
6836852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
6842ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
685b0c632dbSHeiko Carstens }
686b0c632dbSHeiko Carstens 
68731928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
68842897d86SMarcelo Tosatti {
689dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
690dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
69142897d86SMarcelo Tosatti }
69242897d86SMarcelo Tosatti 
6935102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
6945102ee87STony Krowiak {
6955102ee87STony Krowiak 	if (!test_vfacility(76))
6965102ee87STony Krowiak 		return;
6975102ee87STony Krowiak 
6985102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
6995102ee87STony Krowiak }
7005102ee87STony Krowiak 
701b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
702b31605c1SDominik Dingel {
703b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
704b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
705b31605c1SDominik Dingel }
706b31605c1SDominik Dingel 
707b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
708b31605c1SDominik Dingel {
709b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
710b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
711b31605c1SDominik Dingel 		return -ENOMEM;
712b31605c1SDominik Dingel 
713b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
714b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
715b31605c1SDominik Dingel 	return 0;
716b31605c1SDominik Dingel }
717b31605c1SDominik Dingel 
718b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
719b0c632dbSHeiko Carstens {
720b31605c1SDominik Dingel 	int rc = 0;
721b31288faSKonstantin Weitz 
7229e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
7239e6dabefSCornelia Huck 						    CPUSTAT_SM |
72469d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
72569d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
726fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
7277feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
7287feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
7297feb6bb8SMichael Mueller 
73069d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
731ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
732217a4406SHeiko Carstens 	if (sclp_has_siif())
733217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
734ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
735ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
73678c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
7375a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
7385a5e6536SMatthew Rosato 				      ICTL_TPROT;
7395a5e6536SMatthew Rosato 
740b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
741b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
742b31605c1SDominik Dingel 		if (rc)
743b31605c1SDominik Dingel 			return rc;
744b31288faSKonstantin Weitz 	}
7450ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
746ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
747453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
74892e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
7495102ee87STony Krowiak 
7505102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
7515102ee87STony Krowiak 
752b31605c1SDominik Dingel 	return rc;
753b0c632dbSHeiko Carstens }
754b0c632dbSHeiko Carstens 
755b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
756b0c632dbSHeiko Carstens 				      unsigned int id)
757b0c632dbSHeiko Carstens {
7584d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
7597feb6bb8SMichael Mueller 	struct sie_page *sie_page;
7604d47555aSCarsten Otte 	int rc = -EINVAL;
761b0c632dbSHeiko Carstens 
7624d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
7634d47555aSCarsten Otte 		goto out;
7644d47555aSCarsten Otte 
7654d47555aSCarsten Otte 	rc = -ENOMEM;
7664d47555aSCarsten Otte 
767b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
768b0c632dbSHeiko Carstens 	if (!vcpu)
7694d47555aSCarsten Otte 		goto out;
770b0c632dbSHeiko Carstens 
7717feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
7727feb6bb8SMichael Mueller 	if (!sie_page)
773b0c632dbSHeiko Carstens 		goto out_free_cpu;
774b0c632dbSHeiko Carstens 
7757feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
7767feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
7777feb6bb8SMichael Mueller 
778b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
77958f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
78058f9460bSCarsten Otte 		if (!kvm->arch.sca) {
78158f9460bSCarsten Otte 			WARN_ON_ONCE(1);
78258f9460bSCarsten Otte 			goto out_free_cpu;
78358f9460bSCarsten Otte 		}
784abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
78558f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
78658f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
78758f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
78858f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
789b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
790fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
79158f9460bSCarsten Otte 	}
792b0c632dbSHeiko Carstens 
793ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
794ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
795d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
7965288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
797ba5c1e9bSCarsten Otte 
798b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
799b0c632dbSHeiko Carstens 	if (rc)
8007b06bf2fSWei Yongjun 		goto out_free_sie_block;
801b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
802b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
803ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
804b0c632dbSHeiko Carstens 
805b0c632dbSHeiko Carstens 	return vcpu;
8067b06bf2fSWei Yongjun out_free_sie_block:
8077b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
808b0c632dbSHeiko Carstens out_free_cpu:
809b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
8104d47555aSCarsten Otte out:
811b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
812b0c632dbSHeiko Carstens }
813b0c632dbSHeiko Carstens 
814b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
815b0c632dbSHeiko Carstens {
8169a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
817b0c632dbSHeiko Carstens }
818b0c632dbSHeiko Carstens 
81949b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
82049b99e1eSChristian Borntraeger {
82149b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
82249b99e1eSChristian Borntraeger }
82349b99e1eSChristian Borntraeger 
82449b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
82549b99e1eSChristian Borntraeger {
82649b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
82749b99e1eSChristian Borntraeger }
82849b99e1eSChristian Borntraeger 
82949b99e1eSChristian Borntraeger /*
83049b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
83149b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
83249b99e1eSChristian Borntraeger  * return immediately. */
83349b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
83449b99e1eSChristian Borntraeger {
83549b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
83649b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
83749b99e1eSChristian Borntraeger 		cpu_relax();
83849b99e1eSChristian Borntraeger }
83949b99e1eSChristian Borntraeger 
84049b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
84149b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
84249b99e1eSChristian Borntraeger {
84349b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
84449b99e1eSChristian Borntraeger 	exit_sie(vcpu);
84549b99e1eSChristian Borntraeger }
84649b99e1eSChristian Borntraeger 
8472c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
8482c70fe44SChristian Borntraeger {
8492c70fe44SChristian Borntraeger 	int i;
8502c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
8512c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
8522c70fe44SChristian Borntraeger 
8532c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
8542c70fe44SChristian Borntraeger 		/* match against both prefix pages */
855fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
8562c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8572c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
8582c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
8592c70fe44SChristian Borntraeger 		}
8602c70fe44SChristian Borntraeger 	}
8612c70fe44SChristian Borntraeger }
8622c70fe44SChristian Borntraeger 
863b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
864b6d33834SChristoffer Dall {
865b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
866b6d33834SChristoffer Dall 	BUG();
867b6d33834SChristoffer Dall 	return 0;
868b6d33834SChristoffer Dall }
869b6d33834SChristoffer Dall 
87014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
87114eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
87214eebd91SCarsten Otte {
87314eebd91SCarsten Otte 	int r = -EINVAL;
87414eebd91SCarsten Otte 
87514eebd91SCarsten Otte 	switch (reg->id) {
87629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
87729b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
87829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
87929b7c71bSCarsten Otte 		break;
88029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
88129b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
88229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
88329b7c71bSCarsten Otte 		break;
88446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
88546a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
88646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
88746a6dd1cSJason J. herne 		break;
88846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
88946a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
89046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
89146a6dd1cSJason J. herne 		break;
892536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
893536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
894536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
895536336c2SDominik Dingel 		break;
896536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
897536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
898536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
899536336c2SDominik Dingel 		break;
900536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
901536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
902536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
903536336c2SDominik Dingel 		break;
904672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
905672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
906672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
907672550fbSChristian Borntraeger 		break;
908afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
909afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
910afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
911afa45ff5SChristian Borntraeger 		break;
91214eebd91SCarsten Otte 	default:
91314eebd91SCarsten Otte 		break;
91414eebd91SCarsten Otte 	}
91514eebd91SCarsten Otte 
91614eebd91SCarsten Otte 	return r;
91714eebd91SCarsten Otte }
91814eebd91SCarsten Otte 
91914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
92014eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
92114eebd91SCarsten Otte {
92214eebd91SCarsten Otte 	int r = -EINVAL;
92314eebd91SCarsten Otte 
92414eebd91SCarsten Otte 	switch (reg->id) {
92529b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
92629b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
92729b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
92829b7c71bSCarsten Otte 		break;
92929b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
93029b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
93129b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
93229b7c71bSCarsten Otte 		break;
93346a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
93446a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
93546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
93646a6dd1cSJason J. herne 		break;
93746a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
93846a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
93946a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
94046a6dd1cSJason J. herne 		break;
941536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
942536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
943536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
9449fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
9459fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
946536336c2SDominik Dingel 		break;
947536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
948536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
949536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
950536336c2SDominik Dingel 		break;
951536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
952536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
953536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
954536336c2SDominik Dingel 		break;
955672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
956672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
957672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
958672550fbSChristian Borntraeger 		break;
959afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
960afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
961afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
962afa45ff5SChristian Borntraeger 		break;
96314eebd91SCarsten Otte 	default:
96414eebd91SCarsten Otte 		break;
96514eebd91SCarsten Otte 	}
96614eebd91SCarsten Otte 
96714eebd91SCarsten Otte 	return r;
96814eebd91SCarsten Otte }
969b6d33834SChristoffer Dall 
970b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
971b0c632dbSHeiko Carstens {
972b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
973b0c632dbSHeiko Carstens 	return 0;
974b0c632dbSHeiko Carstens }
975b0c632dbSHeiko Carstens 
976b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
977b0c632dbSHeiko Carstens {
9785a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
979b0c632dbSHeiko Carstens 	return 0;
980b0c632dbSHeiko Carstens }
981b0c632dbSHeiko Carstens 
982b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
983b0c632dbSHeiko Carstens {
9845a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
985b0c632dbSHeiko Carstens 	return 0;
986b0c632dbSHeiko Carstens }
987b0c632dbSHeiko Carstens 
988b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
989b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
990b0c632dbSHeiko Carstens {
99159674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
992b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
99359674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
994b0c632dbSHeiko Carstens 	return 0;
995b0c632dbSHeiko Carstens }
996b0c632dbSHeiko Carstens 
997b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
998b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
999b0c632dbSHeiko Carstens {
100059674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1001b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1002b0c632dbSHeiko Carstens 	return 0;
1003b0c632dbSHeiko Carstens }
1004b0c632dbSHeiko Carstens 
1005b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1006b0c632dbSHeiko Carstens {
10074725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
10084725c860SMartin Schwidefsky 		return -EINVAL;
1009b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
10104725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
10114725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10124725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1013b0c632dbSHeiko Carstens 	return 0;
1014b0c632dbSHeiko Carstens }
1015b0c632dbSHeiko Carstens 
1016b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1017b0c632dbSHeiko Carstens {
1018b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1019b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1020b0c632dbSHeiko Carstens 	return 0;
1021b0c632dbSHeiko Carstens }
1022b0c632dbSHeiko Carstens 
1023b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1024b0c632dbSHeiko Carstens {
1025b0c632dbSHeiko Carstens 	int rc = 0;
1026b0c632dbSHeiko Carstens 
10277a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1028b0c632dbSHeiko Carstens 		rc = -EBUSY;
1029d7b0b5ebSCarsten Otte 	else {
1030d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1031d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1032d7b0b5ebSCarsten Otte 	}
1033b0c632dbSHeiko Carstens 	return rc;
1034b0c632dbSHeiko Carstens }
1035b0c632dbSHeiko Carstens 
1036b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1037b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1038b0c632dbSHeiko Carstens {
1039b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1040b0c632dbSHeiko Carstens }
1041b0c632dbSHeiko Carstens 
104227291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
104327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
104427291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
104527291e21SDavid Hildenbrand 
1046d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1047d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1048b0c632dbSHeiko Carstens {
104927291e21SDavid Hildenbrand 	int rc = 0;
105027291e21SDavid Hildenbrand 
105127291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
105227291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
105327291e21SDavid Hildenbrand 
10542de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
105527291e21SDavid Hildenbrand 		return -EINVAL;
105627291e21SDavid Hildenbrand 
105727291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
105827291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
105927291e21SDavid Hildenbrand 		/* enforce guest PER */
106027291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
106127291e21SDavid Hildenbrand 
106227291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
106327291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
106427291e21SDavid Hildenbrand 	} else {
106527291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
106627291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
106727291e21SDavid Hildenbrand 	}
106827291e21SDavid Hildenbrand 
106927291e21SDavid Hildenbrand 	if (rc) {
107027291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
107127291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
107227291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
107327291e21SDavid Hildenbrand 	}
107427291e21SDavid Hildenbrand 
107527291e21SDavid Hildenbrand 	return rc;
1076b0c632dbSHeiko Carstens }
1077b0c632dbSHeiko Carstens 
107862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
107962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
108062d9f0dbSMarcelo Tosatti {
10816352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
10826352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
10836352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
108462d9f0dbSMarcelo Tosatti }
108562d9f0dbSMarcelo Tosatti 
108662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
108762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
108862d9f0dbSMarcelo Tosatti {
10896352e4d2SDavid Hildenbrand 	int rc = 0;
10906352e4d2SDavid Hildenbrand 
10916352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
10926352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
10936352e4d2SDavid Hildenbrand 
10946352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
10956352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
10966352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
10976352e4d2SDavid Hildenbrand 		break;
10986352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
10996352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
11006352e4d2SDavid Hildenbrand 		break;
11016352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
11026352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
11036352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
11046352e4d2SDavid Hildenbrand 	default:
11056352e4d2SDavid Hildenbrand 		rc = -ENXIO;
11066352e4d2SDavid Hildenbrand 	}
11076352e4d2SDavid Hildenbrand 
11086352e4d2SDavid Hildenbrand 	return rc;
110962d9f0dbSMarcelo Tosatti }
111062d9f0dbSMarcelo Tosatti 
1111b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1112b31605c1SDominik Dingel {
1113b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1114b31605c1SDominik Dingel 		return false;
1115b31605c1SDominik Dingel 	/* only enable for z10 and later */
1116b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1117b31605c1SDominik Dingel 		return false;
1118b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1119b31605c1SDominik Dingel 		return false;
1120b31605c1SDominik Dingel 	return true;
1121b31605c1SDominik Dingel }
1122b31605c1SDominik Dingel 
11238ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
11248ad35755SDavid Hildenbrand {
11258ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
11268ad35755SDavid Hildenbrand }
11278ad35755SDavid Hildenbrand 
11282c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
11292c70fe44SChristian Borntraeger {
11308ad35755SDavid Hildenbrand retry:
11318ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
11322c70fe44SChristian Borntraeger 	/*
11332c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
11342c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
11352c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
11362c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
11372c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
11382c70fe44SChristian Borntraeger 	 */
11398ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
11402c70fe44SChristian Borntraeger 		int rc;
11412c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1142fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
11432c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
11442c70fe44SChristian Borntraeger 		if (rc)
11452c70fe44SChristian Borntraeger 			return rc;
11468ad35755SDavid Hildenbrand 		goto retry;
11472c70fe44SChristian Borntraeger 	}
11488ad35755SDavid Hildenbrand 
1149d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1150d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1151d3d692c8SDavid Hildenbrand 		goto retry;
1152d3d692c8SDavid Hildenbrand 	}
1153d3d692c8SDavid Hildenbrand 
11548ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
11558ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
11568ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
11578ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
11588ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
11598ad35755SDavid Hildenbrand 		}
11608ad35755SDavid Hildenbrand 		goto retry;
11618ad35755SDavid Hildenbrand 	}
11628ad35755SDavid Hildenbrand 
11638ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
11648ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
11658ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
11668ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
11678ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
11688ad35755SDavid Hildenbrand 		}
11698ad35755SDavid Hildenbrand 		goto retry;
11708ad35755SDavid Hildenbrand 	}
11718ad35755SDavid Hildenbrand 
11720759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
11730759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
11740759d068SDavid Hildenbrand 
11752c70fe44SChristian Borntraeger 	return 0;
11762c70fe44SChristian Borntraeger }
11772c70fe44SChristian Borntraeger 
1178fa576c58SThomas Huth /**
1179fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1180fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1181fa576c58SThomas Huth  * @gpa: Guest physical address
1182fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1183fa576c58SThomas Huth  *
1184fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1185fa576c58SThomas Huth  *
1186fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1187fa576c58SThomas Huth  */
1188fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
118924eb3a82SDominik Dingel {
1190527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1191527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
119224eb3a82SDominik Dingel }
119324eb3a82SDominik Dingel 
11943c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
11953c038e6bSDominik Dingel 				      unsigned long token)
11963c038e6bSDominik Dingel {
11973c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1198383d0b05SJens Freimann 	struct kvm_s390_irq irq;
11993c038e6bSDominik Dingel 
12003c038e6bSDominik Dingel 	if (start_token) {
1201383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1202383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1203383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
12043c038e6bSDominik Dingel 	} else {
12053c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1206383d0b05SJens Freimann 		inti.parm64 = token;
12073c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
12083c038e6bSDominik Dingel 	}
12093c038e6bSDominik Dingel }
12103c038e6bSDominik Dingel 
12113c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
12123c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
12133c038e6bSDominik Dingel {
12143c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
12153c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
12163c038e6bSDominik Dingel }
12173c038e6bSDominik Dingel 
12183c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
12193c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
12203c038e6bSDominik Dingel {
12213c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
12223c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
12233c038e6bSDominik Dingel }
12243c038e6bSDominik Dingel 
12253c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
12263c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
12273c038e6bSDominik Dingel {
12283c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
12293c038e6bSDominik Dingel }
12303c038e6bSDominik Dingel 
12313c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
12323c038e6bSDominik Dingel {
12333c038e6bSDominik Dingel 	/*
12343c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
12353c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
12363c038e6bSDominik Dingel 	 */
12373c038e6bSDominik Dingel 	return true;
12383c038e6bSDominik Dingel }
12393c038e6bSDominik Dingel 
12403c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
12413c038e6bSDominik Dingel {
12423c038e6bSDominik Dingel 	hva_t hva;
12433c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
12443c038e6bSDominik Dingel 	int rc;
12453c038e6bSDominik Dingel 
12463c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
12473c038e6bSDominik Dingel 		return 0;
12483c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
12493c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
12503c038e6bSDominik Dingel 		return 0;
12513c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
12523c038e6bSDominik Dingel 		return 0;
12539a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
12543c038e6bSDominik Dingel 		return 0;
12553c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
12563c038e6bSDominik Dingel 		return 0;
12573c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
12583c038e6bSDominik Dingel 		return 0;
12593c038e6bSDominik Dingel 
126081480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
126181480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
126281480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
12633c038e6bSDominik Dingel 		return 0;
12643c038e6bSDominik Dingel 
12653c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
12663c038e6bSDominik Dingel 	return rc;
12673c038e6bSDominik Dingel }
12683c038e6bSDominik Dingel 
12693fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1270b0c632dbSHeiko Carstens {
12713fb4c40fSThomas Huth 	int rc, cpuflags;
1272e168bf8dSCarsten Otte 
12733c038e6bSDominik Dingel 	/*
12743c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
12753c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
12763c038e6bSDominik Dingel 	 * handled outside the worker.
12773c038e6bSDominik Dingel 	 */
12783c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
12793c038e6bSDominik Dingel 
12805a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1281b0c632dbSHeiko Carstens 
1282b0c632dbSHeiko Carstens 	if (need_resched())
1283b0c632dbSHeiko Carstens 		schedule();
1284b0c632dbSHeiko Carstens 
1285d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
128671cde587SChristian Borntraeger 		s390_handle_mcck();
128771cde587SChristian Borntraeger 
128879395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
128979395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
129079395031SJens Freimann 		if (rc)
129179395031SJens Freimann 			return rc;
129279395031SJens Freimann 	}
12930ff31867SCarsten Otte 
12942c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
12952c70fe44SChristian Borntraeger 	if (rc)
12962c70fe44SChristian Borntraeger 		return rc;
12972c70fe44SChristian Borntraeger 
129827291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
129927291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
130027291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
130127291e21SDavid Hildenbrand 	}
130227291e21SDavid Hildenbrand 
1303b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
13043fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
13053fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
13063fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
13072b29a9fdSDominik Dingel 
13083fb4c40fSThomas Huth 	return 0;
13093fb4c40fSThomas Huth }
13103fb4c40fSThomas Huth 
13113fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
13123fb4c40fSThomas Huth {
131324eb3a82SDominik Dingel 	int rc = -1;
13142b29a9fdSDominik Dingel 
13152b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
13162b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
13172b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
13182b29a9fdSDominik Dingel 
131927291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
132027291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
132127291e21SDavid Hildenbrand 
13223fb4c40fSThomas Huth 	if (exit_reason >= 0) {
13237c470539SMartin Schwidefsky 		rc = 0;
1324210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1325210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1326210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1327210b1607SThomas Huth 						current->thread.gmap_addr;
1328210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1329210b1607SThomas Huth 		rc = -EREMOTE;
133024eb3a82SDominik Dingel 
133124eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
13323c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
133324eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1334fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
133524eb3a82SDominik Dingel 			rc = 0;
1336fa576c58SThomas Huth 		} else {
1337fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1338fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1339fa576c58SThomas Huth 		}
134024eb3a82SDominik Dingel 	}
134124eb3a82SDominik Dingel 
134224eb3a82SDominik Dingel 	if (rc == -1) {
1343699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1344699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1345699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
13461f0d0f09SCarsten Otte 	}
1347b0c632dbSHeiko Carstens 
13485a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
13493fb4c40fSThomas Huth 
1350a76ccff6SThomas Huth 	if (rc == 0) {
1351a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
13522955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
13532955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1354a76ccff6SThomas Huth 		else
1355a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1356a76ccff6SThomas Huth 	}
1357a76ccff6SThomas Huth 
13583fb4c40fSThomas Huth 	return rc;
13593fb4c40fSThomas Huth }
13603fb4c40fSThomas Huth 
13613fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
13623fb4c40fSThomas Huth {
13633fb4c40fSThomas Huth 	int rc, exit_reason;
13643fb4c40fSThomas Huth 
1365800c1065SThomas Huth 	/*
1366800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1367800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1368800c1065SThomas Huth 	 */
1369800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1370800c1065SThomas Huth 
1371a76ccff6SThomas Huth 	do {
13723fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
13733fb4c40fSThomas Huth 		if (rc)
1374a76ccff6SThomas Huth 			break;
13753fb4c40fSThomas Huth 
1376800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
13773fb4c40fSThomas Huth 		/*
1378a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1379a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
13803fb4c40fSThomas Huth 		 */
13813fb4c40fSThomas Huth 		preempt_disable();
13823fb4c40fSThomas Huth 		kvm_guest_enter();
13833fb4c40fSThomas Huth 		preempt_enable();
1384a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1385a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
13863fb4c40fSThomas Huth 		kvm_guest_exit();
1387800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
13883fb4c40fSThomas Huth 
13893fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
139027291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
13913fb4c40fSThomas Huth 
1392800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1393e168bf8dSCarsten Otte 	return rc;
1394b0c632dbSHeiko Carstens }
1395b0c632dbSHeiko Carstens 
1396b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1397b028ee3eSDavid Hildenbrand {
1398b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1399b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1400b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1401b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1402b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1403b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1404d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1405d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1406b028ee3eSDavid Hildenbrand 	}
1407b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1408b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1409b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1410b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1411b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1412b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1413b028ee3eSDavid Hildenbrand 	}
1414b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1415b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1416b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1417b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
14189fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
14199fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1420b028ee3eSDavid Hildenbrand 	}
1421b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1422b028ee3eSDavid Hildenbrand }
1423b028ee3eSDavid Hildenbrand 
1424b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1425b028ee3eSDavid Hildenbrand {
1426b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1427b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1428b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1429b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1430b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1431b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1432b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1433b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1434b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1435b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1436b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1437b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1438b028ee3eSDavid Hildenbrand }
1439b028ee3eSDavid Hildenbrand 
1440b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1441b0c632dbSHeiko Carstens {
14428f2abe6aSChristian Borntraeger 	int rc;
1443b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1444b0c632dbSHeiko Carstens 
144527291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
144627291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
144727291e21SDavid Hildenbrand 		return 0;
144827291e21SDavid Hildenbrand 	}
144927291e21SDavid Hildenbrand 
1450b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1451b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1452b0c632dbSHeiko Carstens 
14536352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
14546852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
14556352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
14566352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
14576352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
14586352e4d2SDavid Hildenbrand 		return -EINVAL;
14596352e4d2SDavid Hildenbrand 	}
1460b0c632dbSHeiko Carstens 
1461b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1462d7b0b5ebSCarsten Otte 
1463dab4079dSHeiko Carstens 	might_fault();
1464e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
14659ace903dSChristian Ehrhardt 
1466b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1467b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
14688f2abe6aSChristian Borntraeger 		rc = -EINTR;
1469b1d16c49SChristian Ehrhardt 	}
14708f2abe6aSChristian Borntraeger 
147127291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
147227291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
147327291e21SDavid Hildenbrand 		rc = 0;
147427291e21SDavid Hildenbrand 	}
147527291e21SDavid Hildenbrand 
1476b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
14778f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
14788f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
14798f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
14808f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
14818f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
14828f2abe6aSChristian Borntraeger 		rc = 0;
14838f2abe6aSChristian Borntraeger 	}
14848f2abe6aSChristian Borntraeger 
14858f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
14868f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
14878f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
14888f2abe6aSChristian Borntraeger 		rc = 0;
14898f2abe6aSChristian Borntraeger 	}
14908f2abe6aSChristian Borntraeger 
1491b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1492d7b0b5ebSCarsten Otte 
1493b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1494b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1495b0c632dbSHeiko Carstens 
1496b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
14977e8e6ab4SHeiko Carstens 	return rc;
1498b0c632dbSHeiko Carstens }
1499b0c632dbSHeiko Carstens 
1500b0c632dbSHeiko Carstens /*
1501b0c632dbSHeiko Carstens  * store status at address
1502b0c632dbSHeiko Carstens  * we use have two special cases:
1503b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1504b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1505b0c632dbSHeiko Carstens  */
1506d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1507b0c632dbSHeiko Carstens {
1508092670cdSCarsten Otte 	unsigned char archmode = 1;
1509fda902cbSMichael Mueller 	unsigned int px;
1510178bd789SThomas Huth 	u64 clkcomp;
1511d0bce605SHeiko Carstens 	int rc;
1512b0c632dbSHeiko Carstens 
1513d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1514d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1515b0c632dbSHeiko Carstens 			return -EFAULT;
1516d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1517d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1518d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1519b0c632dbSHeiko Carstens 			return -EFAULT;
1520d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1521d0bce605SHeiko Carstens 	}
1522d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1523d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1524d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1525d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1526d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1527d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1528fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1529d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1530fda902cbSMichael Mueller 			      &px, 4);
1531d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1532d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1533d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1534d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1535d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1536d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1537d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1538178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1539d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1540d0bce605SHeiko Carstens 			      &clkcomp, 8);
1541d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1542d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1543d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1544d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1545d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1546b0c632dbSHeiko Carstens }
1547b0c632dbSHeiko Carstens 
1548e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1549e879892cSThomas Huth {
1550e879892cSThomas Huth 	/*
1551e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1552e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1553e879892cSThomas Huth 	 * it into the save area
1554e879892cSThomas Huth 	 */
1555e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1556e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1557e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1558e879892cSThomas Huth 
1559e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1560e879892cSThomas Huth }
1561e879892cSThomas Huth 
15628ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
15638ad35755SDavid Hildenbrand {
15648ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
15658ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
15668ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
15678ad35755SDavid Hildenbrand }
15688ad35755SDavid Hildenbrand 
15698ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
15708ad35755SDavid Hildenbrand {
15718ad35755SDavid Hildenbrand 	unsigned int i;
15728ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
15738ad35755SDavid Hildenbrand 
15748ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
15758ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
15768ad35755SDavid Hildenbrand 	}
15778ad35755SDavid Hildenbrand }
15788ad35755SDavid Hildenbrand 
15798ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
15808ad35755SDavid Hildenbrand {
15818ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
15828ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
15838ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
15848ad35755SDavid Hildenbrand }
15858ad35755SDavid Hildenbrand 
15866852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
15876852d7b6SDavid Hildenbrand {
15888ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
15898ad35755SDavid Hildenbrand 
15908ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
15918ad35755SDavid Hildenbrand 		return;
15928ad35755SDavid Hildenbrand 
15936852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
15948ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1595433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
15968ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
15978ad35755SDavid Hildenbrand 
15988ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
15998ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
16008ad35755SDavid Hildenbrand 			started_vcpus++;
16018ad35755SDavid Hildenbrand 	}
16028ad35755SDavid Hildenbrand 
16038ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
16048ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
16058ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
16068ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
16078ad35755SDavid Hildenbrand 		/*
16088ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
16098ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
16108ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
16118ad35755SDavid Hildenbrand 		 */
16128ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
16138ad35755SDavid Hildenbrand 	}
16148ad35755SDavid Hildenbrand 
16156852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
16168ad35755SDavid Hildenbrand 	/*
16178ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
16188ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
16198ad35755SDavid Hildenbrand 	 */
1620d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1621433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
16228ad35755SDavid Hildenbrand 	return;
16236852d7b6SDavid Hildenbrand }
16246852d7b6SDavid Hildenbrand 
16256852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
16266852d7b6SDavid Hildenbrand {
16278ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
16288ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
16298ad35755SDavid Hildenbrand 
16308ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
16318ad35755SDavid Hildenbrand 		return;
16328ad35755SDavid Hildenbrand 
16336852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
16348ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1635433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
16368ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
16378ad35755SDavid Hildenbrand 
163832f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
16396cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
164032f5ff63SDavid Hildenbrand 
16416cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
16428ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
16438ad35755SDavid Hildenbrand 
16448ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
16458ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
16468ad35755SDavid Hildenbrand 			started_vcpus++;
16478ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
16488ad35755SDavid Hildenbrand 		}
16498ad35755SDavid Hildenbrand 	}
16508ad35755SDavid Hildenbrand 
16518ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
16528ad35755SDavid Hildenbrand 		/*
16538ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
16548ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
16558ad35755SDavid Hildenbrand 		 */
16568ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
16578ad35755SDavid Hildenbrand 	}
16588ad35755SDavid Hildenbrand 
1659433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
16608ad35755SDavid Hildenbrand 	return;
16616852d7b6SDavid Hildenbrand }
16626852d7b6SDavid Hildenbrand 
1663d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1664d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1665d6712df9SCornelia Huck {
1666d6712df9SCornelia Huck 	int r;
1667d6712df9SCornelia Huck 
1668d6712df9SCornelia Huck 	if (cap->flags)
1669d6712df9SCornelia Huck 		return -EINVAL;
1670d6712df9SCornelia Huck 
1671d6712df9SCornelia Huck 	switch (cap->cap) {
1672fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1673fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1674fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1675fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1676fa6b7fe9SCornelia Huck 		}
1677fa6b7fe9SCornelia Huck 		r = 0;
1678fa6b7fe9SCornelia Huck 		break;
1679d6712df9SCornelia Huck 	default:
1680d6712df9SCornelia Huck 		r = -EINVAL;
1681d6712df9SCornelia Huck 		break;
1682d6712df9SCornelia Huck 	}
1683d6712df9SCornelia Huck 	return r;
1684d6712df9SCornelia Huck }
1685d6712df9SCornelia Huck 
1686b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1687b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1688b0c632dbSHeiko Carstens {
1689b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1690b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1691800c1065SThomas Huth 	int idx;
1692bc923cc9SAvi Kivity 	long r;
1693b0c632dbSHeiko Carstens 
169493736624SAvi Kivity 	switch (ioctl) {
169593736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1696ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1697383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
1698ba5c1e9bSCarsten Otte 
169993736624SAvi Kivity 		r = -EFAULT;
1700ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
170193736624SAvi Kivity 			break;
1702383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
1703383d0b05SJens Freimann 			return -EINVAL;
1704383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
170593736624SAvi Kivity 		break;
1706ba5c1e9bSCarsten Otte 	}
1707b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1708800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1709bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1710800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1711bc923cc9SAvi Kivity 		break;
1712b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1713b0c632dbSHeiko Carstens 		psw_t psw;
1714b0c632dbSHeiko Carstens 
1715bc923cc9SAvi Kivity 		r = -EFAULT;
1716b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1717bc923cc9SAvi Kivity 			break;
1718bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1719bc923cc9SAvi Kivity 		break;
1720b0c632dbSHeiko Carstens 	}
1721b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1722bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1723bc923cc9SAvi Kivity 		break;
172414eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
172514eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
172614eebd91SCarsten Otte 		struct kvm_one_reg reg;
172714eebd91SCarsten Otte 		r = -EFAULT;
172814eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
172914eebd91SCarsten Otte 			break;
173014eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
173114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
173214eebd91SCarsten Otte 		else
173314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
173414eebd91SCarsten Otte 		break;
173514eebd91SCarsten Otte 	}
173627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
173727e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
173827e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
173927e0393fSCarsten Otte 
174027e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
174127e0393fSCarsten Otte 			r = -EFAULT;
174227e0393fSCarsten Otte 			break;
174327e0393fSCarsten Otte 		}
174427e0393fSCarsten Otte 
174527e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
174627e0393fSCarsten Otte 			r = -EINVAL;
174727e0393fSCarsten Otte 			break;
174827e0393fSCarsten Otte 		}
174927e0393fSCarsten Otte 
175027e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
175127e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
175227e0393fSCarsten Otte 		break;
175327e0393fSCarsten Otte 	}
175427e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
175527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
175627e0393fSCarsten Otte 
175727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
175827e0393fSCarsten Otte 			r = -EFAULT;
175927e0393fSCarsten Otte 			break;
176027e0393fSCarsten Otte 		}
176127e0393fSCarsten Otte 
176227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
176327e0393fSCarsten Otte 			r = -EINVAL;
176427e0393fSCarsten Otte 			break;
176527e0393fSCarsten Otte 		}
176627e0393fSCarsten Otte 
176727e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
176827e0393fSCarsten Otte 			ucasmap.length);
176927e0393fSCarsten Otte 		break;
177027e0393fSCarsten Otte 	}
177127e0393fSCarsten Otte #endif
1772ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1773527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
1774ccc7910fSCarsten Otte 		break;
1775ccc7910fSCarsten Otte 	}
1776d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1777d6712df9SCornelia Huck 	{
1778d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1779d6712df9SCornelia Huck 		r = -EFAULT;
1780d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1781d6712df9SCornelia Huck 			break;
1782d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1783d6712df9SCornelia Huck 		break;
1784d6712df9SCornelia Huck 	}
1785b0c632dbSHeiko Carstens 	default:
17863e6afcf1SCarsten Otte 		r = -ENOTTY;
1787b0c632dbSHeiko Carstens 	}
1788bc923cc9SAvi Kivity 	return r;
1789b0c632dbSHeiko Carstens }
1790b0c632dbSHeiko Carstens 
17915b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
17925b1c1493SCarsten Otte {
17935b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
17945b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
17955b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
17965b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
17975b1c1493SCarsten Otte 		get_page(vmf->page);
17985b1c1493SCarsten Otte 		return 0;
17995b1c1493SCarsten Otte 	}
18005b1c1493SCarsten Otte #endif
18015b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
18025b1c1493SCarsten Otte }
18035b1c1493SCarsten Otte 
18045587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
18055587027cSAneesh Kumar K.V 			    unsigned long npages)
1806db3fe4ebSTakuya Yoshikawa {
1807db3fe4ebSTakuya Yoshikawa 	return 0;
1808db3fe4ebSTakuya Yoshikawa }
1809db3fe4ebSTakuya Yoshikawa 
1810b0c632dbSHeiko Carstens /* Section: memory related */
1811f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1812f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
18137b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
18147b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1815b0c632dbSHeiko Carstens {
1816dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1817dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1818dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1819dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1820b0c632dbSHeiko Carstens 
1821598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1822b0c632dbSHeiko Carstens 		return -EINVAL;
1823b0c632dbSHeiko Carstens 
1824598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1825b0c632dbSHeiko Carstens 		return -EINVAL;
1826b0c632dbSHeiko Carstens 
1827f7784b8eSMarcelo Tosatti 	return 0;
1828f7784b8eSMarcelo Tosatti }
1829f7784b8eSMarcelo Tosatti 
1830f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1831f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
18328482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
18338482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1834f7784b8eSMarcelo Tosatti {
1835f7850c92SCarsten Otte 	int rc;
1836f7784b8eSMarcelo Tosatti 
18372cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
18382cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
18392cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
18402cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
18412cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
18422cef4debSChristian Borntraeger 	 */
18432cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
18442cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
18452cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
18462cef4debSChristian Borntraeger 		return;
1847598841caSCarsten Otte 
1848598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1849598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1850598841caSCarsten Otte 	if (rc)
1851f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1852598841caSCarsten Otte 	return;
1853b0c632dbSHeiko Carstens }
1854b0c632dbSHeiko Carstens 
1855b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1856b0c632dbSHeiko Carstens {
1857ef50f7acSChristian Borntraeger 	int ret;
18580ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1859ef50f7acSChristian Borntraeger 	if (ret)
1860ef50f7acSChristian Borntraeger 		return ret;
1861ef50f7acSChristian Borntraeger 
1862ef50f7acSChristian Borntraeger 	/*
1863ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
186425985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1865ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1866ef50f7acSChristian Borntraeger 	 */
186778c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
186878c4b59fSMichael Mueller 	if (!vfacilities) {
1869ef50f7acSChristian Borntraeger 		kvm_exit();
1870ef50f7acSChristian Borntraeger 		return -ENOMEM;
1871ef50f7acSChristian Borntraeger 	}
187278c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
18737be81a46SChristian Borntraeger 	vfacilities[0] &= 0xff82fffbf47c2000UL;
18747feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
1875ef50f7acSChristian Borntraeger 	return 0;
1876b0c632dbSHeiko Carstens }
1877b0c632dbSHeiko Carstens 
1878b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
1879b0c632dbSHeiko Carstens {
188078c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
1881b0c632dbSHeiko Carstens 	kvm_exit();
1882b0c632dbSHeiko Carstens }
1883b0c632dbSHeiko Carstens 
1884b0c632dbSHeiko Carstens module_init(kvm_s390_init);
1885b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
1886566af940SCornelia Huck 
1887566af940SCornelia Huck /*
1888566af940SCornelia Huck  * Enable autoloading of the kvm module.
1889566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1890566af940SCornelia Huck  * since x86 takes a different approach.
1891566af940SCornelia Huck  */
1892566af940SCornelia Huck #include <linux/miscdevice.h>
1893566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
1894566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
1895