xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 72f250206f0f291190ab7f54e4d92ab211779929)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25b0c632dbSHeiko Carstens #include <linux/slab.h>
26ba5c1e9bSCarsten Otte #include <linux/timer.h>
27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
28b0c632dbSHeiko Carstens #include <asm/lowcore.h>
29b0c632dbSHeiko Carstens #include <asm/pgtable.h>
30f5daba1dSHeiko Carstens #include <asm/nmi.h>
31a0616cdeSDavid Howells #include <asm/switch_to.h>
3278c4b59fSMichael Mueller #include <asm/facility.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
54f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
56aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
58ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
597697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
60ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6769d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
69453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
70453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
71453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
72453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
738a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
75453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
76b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
77453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
78453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
79bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
805288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
827697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
835288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
865288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
895288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9342cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
95388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
96e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9741628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
98b0c632dbSHeiko Carstens 	{ NULL }
99b0c632dbSHeiko Carstens };
100b0c632dbSHeiko Carstens 
10178c4b59fSMichael Mueller unsigned long *vfacilities;
1022c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier;
103b0c632dbSHeiko Carstens 
10478c4b59fSMichael Mueller /* test availability of vfacility */
105280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr)
10678c4b59fSMichael Mueller {
10778c4b59fSMichael Mueller 	return __test_facility(nr, (void *) vfacilities);
10878c4b59fSMichael Mueller }
10978c4b59fSMichael Mueller 
110b0c632dbSHeiko Carstens /* Section: not file related */
11113a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
112b0c632dbSHeiko Carstens {
113b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
11410474ae8SAlexander Graf 	return 0;
115b0c632dbSHeiko Carstens }
116b0c632dbSHeiko Carstens 
1172c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1182c70fe44SChristian Borntraeger 
119b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
120b0c632dbSHeiko Carstens {
1212c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1222c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
123b0c632dbSHeiko Carstens 	return 0;
124b0c632dbSHeiko Carstens }
125b0c632dbSHeiko Carstens 
126b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
127b0c632dbSHeiko Carstens {
1282c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
129b0c632dbSHeiko Carstens }
130b0c632dbSHeiko Carstens 
131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
132b0c632dbSHeiko Carstens {
13384877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
13484877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens /* Section: device related */
138b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
139b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
140b0c632dbSHeiko Carstens {
141b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
142b0c632dbSHeiko Carstens 		return s390_enable_sie();
143b0c632dbSHeiko Carstens 	return -EINVAL;
144b0c632dbSHeiko Carstens }
145b0c632dbSHeiko Carstens 
146784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
147b0c632dbSHeiko Carstens {
148d7b0b5ebSCarsten Otte 	int r;
149d7b0b5ebSCarsten Otte 
1502bd0ac4eSCarsten Otte 	switch (ext) {
151d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
152b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
15352e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1541efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1551efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1561efd0f59SCarsten Otte #endif
1573c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
15860b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
15914eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
160d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
161fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
162ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
16310ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
164c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
165d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
16678599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
167f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1686352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
1692444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
170d7b0b5ebSCarsten Otte 		r = 1;
171d7b0b5ebSCarsten Otte 		break;
172e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
173e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
174e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
175e726b1bdSChristian Borntraeger 		break;
176e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
177e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
178e1e2e605SNick Wang 		break;
1791526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
180abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1811526bf9cSChristian Borntraeger 		break;
1822bd0ac4eSCarsten Otte 	default:
183d7b0b5ebSCarsten Otte 		r = 0;
184b0c632dbSHeiko Carstens 	}
185d7b0b5ebSCarsten Otte 	return r;
1862bd0ac4eSCarsten Otte }
187b0c632dbSHeiko Carstens 
18815f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
18915f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
19015f36ebdSJason J. Herne {
19115f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
19215f36ebdSJason J. Herne 	unsigned long address;
19315f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
19415f36ebdSJason J. Herne 
19515f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
19615f36ebdSJason J. Herne 	/* Loop over all guest pages */
19715f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
19815f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
19915f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
20015f36ebdSJason J. Herne 
20115f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
20215f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
20315f36ebdSJason J. Herne 	}
20415f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
20515f36ebdSJason J. Herne }
20615f36ebdSJason J. Herne 
207b0c632dbSHeiko Carstens /* Section: vm related */
208b0c632dbSHeiko Carstens /*
209b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
210b0c632dbSHeiko Carstens  */
211b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
213b0c632dbSHeiko Carstens {
21415f36ebdSJason J. Herne 	int r;
21515f36ebdSJason J. Herne 	unsigned long n;
21615f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
21715f36ebdSJason J. Herne 	int is_dirty = 0;
21815f36ebdSJason J. Herne 
21915f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
22015f36ebdSJason J. Herne 
22115f36ebdSJason J. Herne 	r = -EINVAL;
22215f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
22315f36ebdSJason J. Herne 		goto out;
22415f36ebdSJason J. Herne 
22515f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
22615f36ebdSJason J. Herne 	r = -ENOENT;
22715f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
22815f36ebdSJason J. Herne 		goto out;
22915f36ebdSJason J. Herne 
23015f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
23115f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
23215f36ebdSJason J. Herne 	if (r)
23315f36ebdSJason J. Herne 		goto out;
23415f36ebdSJason J. Herne 
23515f36ebdSJason J. Herne 	/* Clear the dirty log */
23615f36ebdSJason J. Herne 	if (is_dirty) {
23715f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
23815f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
23915f36ebdSJason J. Herne 	}
24015f36ebdSJason J. Herne 	r = 0;
24115f36ebdSJason J. Herne out:
24215f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
24315f36ebdSJason J. Herne 	return r;
244b0c632dbSHeiko Carstens }
245b0c632dbSHeiko Carstens 
246d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247d938dc55SCornelia Huck {
248d938dc55SCornelia Huck 	int r;
249d938dc55SCornelia Huck 
250d938dc55SCornelia Huck 	if (cap->flags)
251d938dc55SCornelia Huck 		return -EINVAL;
252d938dc55SCornelia Huck 
253d938dc55SCornelia Huck 	switch (cap->cap) {
25484223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
25584223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
25684223598SCornelia Huck 		r = 0;
25784223598SCornelia Huck 		break;
2582444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
2592444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
2602444b352SDavid Hildenbrand 		r = 0;
2612444b352SDavid Hildenbrand 		break;
262d938dc55SCornelia Huck 	default:
263d938dc55SCornelia Huck 		r = -EINVAL;
264d938dc55SCornelia Huck 		break;
265d938dc55SCornelia Huck 	}
266d938dc55SCornelia Huck 	return r;
267d938dc55SCornelia Huck }
268d938dc55SCornelia Huck 
2698c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2708c0a7ce6SDominik Dingel {
2718c0a7ce6SDominik Dingel 	int ret;
2728c0a7ce6SDominik Dingel 
2738c0a7ce6SDominik Dingel 	switch (attr->attr) {
2748c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2758c0a7ce6SDominik Dingel 		ret = 0;
2768c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2778c0a7ce6SDominik Dingel 			ret = -EFAULT;
2788c0a7ce6SDominik Dingel 		break;
2798c0a7ce6SDominik Dingel 	default:
2808c0a7ce6SDominik Dingel 		ret = -ENXIO;
2818c0a7ce6SDominik Dingel 		break;
2828c0a7ce6SDominik Dingel 	}
2838c0a7ce6SDominik Dingel 	return ret;
2848c0a7ce6SDominik Dingel }
2858c0a7ce6SDominik Dingel 
2868c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2874f718eabSDominik Dingel {
2884f718eabSDominik Dingel 	int ret;
2894f718eabSDominik Dingel 	unsigned int idx;
2904f718eabSDominik Dingel 	switch (attr->attr) {
2914f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
2924f718eabSDominik Dingel 		ret = -EBUSY;
2934f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
2944f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
2954f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
2964f718eabSDominik Dingel 			ret = 0;
2974f718eabSDominik Dingel 		}
2984f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
2994f718eabSDominik Dingel 		break;
3004f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3014f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3024f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
303a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3044f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3054f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3064f718eabSDominik Dingel 		ret = 0;
3074f718eabSDominik Dingel 		break;
3088c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3098c0a7ce6SDominik Dingel 		unsigned long new_limit;
3108c0a7ce6SDominik Dingel 
3118c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3128c0a7ce6SDominik Dingel 			return -EINVAL;
3138c0a7ce6SDominik Dingel 
3148c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3158c0a7ce6SDominik Dingel 			return -EFAULT;
3168c0a7ce6SDominik Dingel 
3178c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3188c0a7ce6SDominik Dingel 			return -E2BIG;
3198c0a7ce6SDominik Dingel 
3208c0a7ce6SDominik Dingel 		ret = -EBUSY;
3218c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3228c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3238c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3248c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3258c0a7ce6SDominik Dingel 
3268c0a7ce6SDominik Dingel 			if (!new) {
3278c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3288c0a7ce6SDominik Dingel 			} else {
3298c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3308c0a7ce6SDominik Dingel 				new->private = kvm;
3318c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3328c0a7ce6SDominik Dingel 				ret = 0;
3338c0a7ce6SDominik Dingel 			}
3348c0a7ce6SDominik Dingel 		}
3358c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3368c0a7ce6SDominik Dingel 		break;
3378c0a7ce6SDominik Dingel 	}
3384f718eabSDominik Dingel 	default:
3394f718eabSDominik Dingel 		ret = -ENXIO;
3404f718eabSDominik Dingel 		break;
3414f718eabSDominik Dingel 	}
3424f718eabSDominik Dingel 	return ret;
3434f718eabSDominik Dingel }
3444f718eabSDominik Dingel 
345*72f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
346*72f25020SJason J. Herne {
347*72f25020SJason J. Herne 	u8 gtod_high;
348*72f25020SJason J. Herne 
349*72f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
350*72f25020SJason J. Herne 					   sizeof(gtod_high)))
351*72f25020SJason J. Herne 		return -EFAULT;
352*72f25020SJason J. Herne 
353*72f25020SJason J. Herne 	if (gtod_high != 0)
354*72f25020SJason J. Herne 		return -EINVAL;
355*72f25020SJason J. Herne 
356*72f25020SJason J. Herne 	return 0;
357*72f25020SJason J. Herne }
358*72f25020SJason J. Herne 
359*72f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
360*72f25020SJason J. Herne {
361*72f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
362*72f25020SJason J. Herne 	unsigned int vcpu_idx;
363*72f25020SJason J. Herne 	u64 host_tod, gtod;
364*72f25020SJason J. Herne 	int r;
365*72f25020SJason J. Herne 
366*72f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
367*72f25020SJason J. Herne 		return -EFAULT;
368*72f25020SJason J. Herne 
369*72f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
370*72f25020SJason J. Herne 	if (r)
371*72f25020SJason J. Herne 		return r;
372*72f25020SJason J. Herne 
373*72f25020SJason J. Herne 	mutex_lock(&kvm->lock);
374*72f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
375*72f25020SJason J. Herne 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
376*72f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
377*72f25020SJason J. Herne 		exit_sie(cur_vcpu);
378*72f25020SJason J. Herne 	}
379*72f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
380*72f25020SJason J. Herne 	return 0;
381*72f25020SJason J. Herne }
382*72f25020SJason J. Herne 
383*72f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
384*72f25020SJason J. Herne {
385*72f25020SJason J. Herne 	int ret;
386*72f25020SJason J. Herne 
387*72f25020SJason J. Herne 	if (attr->flags)
388*72f25020SJason J. Herne 		return -EINVAL;
389*72f25020SJason J. Herne 
390*72f25020SJason J. Herne 	switch (attr->attr) {
391*72f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
392*72f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
393*72f25020SJason J. Herne 		break;
394*72f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
395*72f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
396*72f25020SJason J. Herne 		break;
397*72f25020SJason J. Herne 	default:
398*72f25020SJason J. Herne 		ret = -ENXIO;
399*72f25020SJason J. Herne 		break;
400*72f25020SJason J. Herne 	}
401*72f25020SJason J. Herne 	return ret;
402*72f25020SJason J. Herne }
403*72f25020SJason J. Herne 
404*72f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
405*72f25020SJason J. Herne {
406*72f25020SJason J. Herne 	u8 gtod_high = 0;
407*72f25020SJason J. Herne 
408*72f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
409*72f25020SJason J. Herne 					 sizeof(gtod_high)))
410*72f25020SJason J. Herne 		return -EFAULT;
411*72f25020SJason J. Herne 
412*72f25020SJason J. Herne 	return 0;
413*72f25020SJason J. Herne }
414*72f25020SJason J. Herne 
415*72f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
416*72f25020SJason J. Herne {
417*72f25020SJason J. Herne 	u64 host_tod, gtod;
418*72f25020SJason J. Herne 	int r;
419*72f25020SJason J. Herne 
420*72f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
421*72f25020SJason J. Herne 	if (r)
422*72f25020SJason J. Herne 		return r;
423*72f25020SJason J. Herne 
424*72f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
425*72f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
426*72f25020SJason J. Herne 		return -EFAULT;
427*72f25020SJason J. Herne 
428*72f25020SJason J. Herne 	return 0;
429*72f25020SJason J. Herne }
430*72f25020SJason J. Herne 
431*72f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
432*72f25020SJason J. Herne {
433*72f25020SJason J. Herne 	int ret;
434*72f25020SJason J. Herne 
435*72f25020SJason J. Herne 	if (attr->flags)
436*72f25020SJason J. Herne 		return -EINVAL;
437*72f25020SJason J. Herne 
438*72f25020SJason J. Herne 	switch (attr->attr) {
439*72f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
440*72f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
441*72f25020SJason J. Herne 		break;
442*72f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
443*72f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
444*72f25020SJason J. Herne 		break;
445*72f25020SJason J. Herne 	default:
446*72f25020SJason J. Herne 		ret = -ENXIO;
447*72f25020SJason J. Herne 		break;
448*72f25020SJason J. Herne 	}
449*72f25020SJason J. Herne 	return ret;
450*72f25020SJason J. Herne }
451*72f25020SJason J. Herne 
452f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
453f2061656SDominik Dingel {
454f2061656SDominik Dingel 	int ret;
455f2061656SDominik Dingel 
456f2061656SDominik Dingel 	switch (attr->group) {
4574f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
4588c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
4594f718eabSDominik Dingel 		break;
460*72f25020SJason J. Herne 	case KVM_S390_VM_TOD:
461*72f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
462*72f25020SJason J. Herne 		break;
463f2061656SDominik Dingel 	default:
464f2061656SDominik Dingel 		ret = -ENXIO;
465f2061656SDominik Dingel 		break;
466f2061656SDominik Dingel 	}
467f2061656SDominik Dingel 
468f2061656SDominik Dingel 	return ret;
469f2061656SDominik Dingel }
470f2061656SDominik Dingel 
471f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
472f2061656SDominik Dingel {
4738c0a7ce6SDominik Dingel 	int ret;
4748c0a7ce6SDominik Dingel 
4758c0a7ce6SDominik Dingel 	switch (attr->group) {
4768c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
4778c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
4788c0a7ce6SDominik Dingel 		break;
479*72f25020SJason J. Herne 	case KVM_S390_VM_TOD:
480*72f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
481*72f25020SJason J. Herne 		break;
4828c0a7ce6SDominik Dingel 	default:
4838c0a7ce6SDominik Dingel 		ret = -ENXIO;
4848c0a7ce6SDominik Dingel 		break;
4858c0a7ce6SDominik Dingel 	}
4868c0a7ce6SDominik Dingel 
4878c0a7ce6SDominik Dingel 	return ret;
488f2061656SDominik Dingel }
489f2061656SDominik Dingel 
490f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
491f2061656SDominik Dingel {
492f2061656SDominik Dingel 	int ret;
493f2061656SDominik Dingel 
494f2061656SDominik Dingel 	switch (attr->group) {
4954f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
4964f718eabSDominik Dingel 		switch (attr->attr) {
4974f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
4984f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
4998c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
5004f718eabSDominik Dingel 			ret = 0;
5014f718eabSDominik Dingel 			break;
5024f718eabSDominik Dingel 		default:
5034f718eabSDominik Dingel 			ret = -ENXIO;
5044f718eabSDominik Dingel 			break;
5054f718eabSDominik Dingel 		}
5064f718eabSDominik Dingel 		break;
507*72f25020SJason J. Herne 	case KVM_S390_VM_TOD:
508*72f25020SJason J. Herne 		switch (attr->attr) {
509*72f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
510*72f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
511*72f25020SJason J. Herne 			ret = 0;
512*72f25020SJason J. Herne 			break;
513*72f25020SJason J. Herne 		default:
514*72f25020SJason J. Herne 			ret = -ENXIO;
515*72f25020SJason J. Herne 			break;
516*72f25020SJason J. Herne 		}
517*72f25020SJason J. Herne 		break;
518f2061656SDominik Dingel 	default:
519f2061656SDominik Dingel 		ret = -ENXIO;
520f2061656SDominik Dingel 		break;
521f2061656SDominik Dingel 	}
522f2061656SDominik Dingel 
523f2061656SDominik Dingel 	return ret;
524f2061656SDominik Dingel }
525f2061656SDominik Dingel 
526b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
527b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
528b0c632dbSHeiko Carstens {
529b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
530b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
531f2061656SDominik Dingel 	struct kvm_device_attr attr;
532b0c632dbSHeiko Carstens 	int r;
533b0c632dbSHeiko Carstens 
534b0c632dbSHeiko Carstens 	switch (ioctl) {
535ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
536ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
537ba5c1e9bSCarsten Otte 
538ba5c1e9bSCarsten Otte 		r = -EFAULT;
539ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
540ba5c1e9bSCarsten Otte 			break;
541ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
542ba5c1e9bSCarsten Otte 		break;
543ba5c1e9bSCarsten Otte 	}
544d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
545d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
546d938dc55SCornelia Huck 		r = -EFAULT;
547d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
548d938dc55SCornelia Huck 			break;
549d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
550d938dc55SCornelia Huck 		break;
551d938dc55SCornelia Huck 	}
55284223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
55384223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
55484223598SCornelia Huck 
55584223598SCornelia Huck 		r = -EINVAL;
55684223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
55784223598SCornelia Huck 			/* Set up dummy routing. */
55884223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
55984223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
56084223598SCornelia Huck 			r = 0;
56184223598SCornelia Huck 		}
56284223598SCornelia Huck 		break;
56384223598SCornelia Huck 	}
564f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
565f2061656SDominik Dingel 		r = -EFAULT;
566f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
567f2061656SDominik Dingel 			break;
568f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
569f2061656SDominik Dingel 		break;
570f2061656SDominik Dingel 	}
571f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
572f2061656SDominik Dingel 		r = -EFAULT;
573f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
574f2061656SDominik Dingel 			break;
575f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
576f2061656SDominik Dingel 		break;
577f2061656SDominik Dingel 	}
578f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
579f2061656SDominik Dingel 		r = -EFAULT;
580f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
581f2061656SDominik Dingel 			break;
582f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
583f2061656SDominik Dingel 		break;
584f2061656SDominik Dingel 	}
585b0c632dbSHeiko Carstens 	default:
586367e1319SAvi Kivity 		r = -ENOTTY;
587b0c632dbSHeiko Carstens 	}
588b0c632dbSHeiko Carstens 
589b0c632dbSHeiko Carstens 	return r;
590b0c632dbSHeiko Carstens }
591b0c632dbSHeiko Carstens 
5925102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
5935102ee87STony Krowiak {
5945102ee87STony Krowiak 	if (!test_vfacility(76))
5955102ee87STony Krowiak 		return 0;
5965102ee87STony Krowiak 
5975102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
5985102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
5995102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
6005102ee87STony Krowiak 		return -ENOMEM;
6015102ee87STony Krowiak 
6025102ee87STony Krowiak 	kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
6035102ee87STony Krowiak 				  CRYCB_FORMAT1;
6045102ee87STony Krowiak 
6055102ee87STony Krowiak 	return 0;
6065102ee87STony Krowiak }
6075102ee87STony Krowiak 
608e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
609b0c632dbSHeiko Carstens {
610b0c632dbSHeiko Carstens 	int rc;
611b0c632dbSHeiko Carstens 	char debug_name[16];
612f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
613b0c632dbSHeiko Carstens 
614e08b9637SCarsten Otte 	rc = -EINVAL;
615e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
616e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
617e08b9637SCarsten Otte 		goto out_err;
618e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
619e08b9637SCarsten Otte 		goto out_err;
620e08b9637SCarsten Otte #else
621e08b9637SCarsten Otte 	if (type)
622e08b9637SCarsten Otte 		goto out_err;
623e08b9637SCarsten Otte #endif
624e08b9637SCarsten Otte 
625b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
626b0c632dbSHeiko Carstens 	if (rc)
627d89f5effSJan Kiszka 		goto out_err;
628b0c632dbSHeiko Carstens 
629b290411aSCarsten Otte 	rc = -ENOMEM;
630b290411aSCarsten Otte 
631b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
632b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
633d89f5effSJan Kiszka 		goto out_err;
634f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
635f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
636f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
637f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
638b0c632dbSHeiko Carstens 
639b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
640b0c632dbSHeiko Carstens 
641b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
642b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
643b0c632dbSHeiko Carstens 		goto out_nodbf;
644b0c632dbSHeiko Carstens 
6455102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
6465102ee87STony Krowiak 		goto out_crypto;
6475102ee87STony Krowiak 
648ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
649ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
6508a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
651a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
652ba5c1e9bSCarsten Otte 
653b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
654b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
655b0c632dbSHeiko Carstens 
656e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
657e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
658e08b9637SCarsten Otte 	} else {
6590349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
660598841caSCarsten Otte 		if (!kvm->arch.gmap)
661598841caSCarsten Otte 			goto out_nogmap;
6622c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
66324eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
664e08b9637SCarsten Otte 	}
665fa6b7fe9SCornelia Huck 
666fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
66784223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
668*72f25020SJason J. Herne 	kvm->arch.epoch = 0;
669fa6b7fe9SCornelia Huck 
6708ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
6718ad35755SDavid Hildenbrand 
672d89f5effSJan Kiszka 	return 0;
673598841caSCarsten Otte out_nogmap:
6745102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
6755102ee87STony Krowiak out_crypto:
676598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
677b0c632dbSHeiko Carstens out_nodbf:
678b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
679d89f5effSJan Kiszka out_err:
680d89f5effSJan Kiszka 	return rc;
681b0c632dbSHeiko Carstens }
682b0c632dbSHeiko Carstens 
683d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
684d329c035SChristian Borntraeger {
685d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
686ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
68767335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
6883c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
68958f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
69058f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
69158f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
692abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
693abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
694abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
69558f9460bSCarsten Otte 	}
696abf4a71eSCarsten Otte 	smp_mb();
69727e0393fSCarsten Otte 
69827e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
69927e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
70027e0393fSCarsten Otte 
701b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
702b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
703d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
704b31288faSKonstantin Weitz 
7056692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
706b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
707d329c035SChristian Borntraeger }
708d329c035SChristian Borntraeger 
709d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
710d329c035SChristian Borntraeger {
711d329c035SChristian Borntraeger 	unsigned int i;
712988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
713d329c035SChristian Borntraeger 
714988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
715988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
716988a2caeSGleb Natapov 
717988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
718988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
719d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
720988a2caeSGleb Natapov 
721988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
722988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
723d329c035SChristian Borntraeger }
724d329c035SChristian Borntraeger 
725b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
726b0c632dbSHeiko Carstens {
727d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
728b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
729d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
7305102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
73127e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
732598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
733841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
73467335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
735b0c632dbSHeiko Carstens }
736b0c632dbSHeiko Carstens 
737b0c632dbSHeiko Carstens /* Section: vcpu related */
738dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
739b0c632dbSHeiko Carstens {
740c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
74127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
74227e0393fSCarsten Otte 		return -ENOMEM;
7432c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
744dafd032aSDominik Dingel 
74527e0393fSCarsten Otte 	return 0;
74627e0393fSCarsten Otte }
74727e0393fSCarsten Otte 
748dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
749dafd032aSDominik Dingel {
750dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
751dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
75259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
75359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
7549eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
755b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
756b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
757b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
758dafd032aSDominik Dingel 
759dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
760dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
761dafd032aSDominik Dingel 
762b0c632dbSHeiko Carstens 	return 0;
763b0c632dbSHeiko Carstens }
764b0c632dbSHeiko Carstens 
765b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
766b0c632dbSHeiko Carstens {
7674725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
7684725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.host_fpregs.fprs);
769b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
7704725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
7714725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
77259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
773480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
7749e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
775b0c632dbSHeiko Carstens }
776b0c632dbSHeiko Carstens 
777b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
778b0c632dbSHeiko Carstens {
7799e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
780480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
7814725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
7824725c860SMartin Schwidefsky 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
78359674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
7844725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
7854725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.host_fpregs.fprs);
786b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
787b0c632dbSHeiko Carstens }
788b0c632dbSHeiko Carstens 
789b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
790b0c632dbSHeiko Carstens {
791b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
792b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
793b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
7948d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
795b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
796b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
797b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
798b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
799b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
800b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
801b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
802b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
803b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
804672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
8053c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
8063c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
8076352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
8086852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
8092ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
810b0c632dbSHeiko Carstens }
811b0c632dbSHeiko Carstens 
81231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
81342897d86SMarcelo Tosatti {
814*72f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
815*72f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
816*72f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
817dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
818dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
81942897d86SMarcelo Tosatti }
82042897d86SMarcelo Tosatti 
8215102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
8225102ee87STony Krowiak {
8235102ee87STony Krowiak 	if (!test_vfacility(76))
8245102ee87STony Krowiak 		return;
8255102ee87STony Krowiak 
8265102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
8275102ee87STony Krowiak }
8285102ee87STony Krowiak 
829b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
830b31605c1SDominik Dingel {
831b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
832b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
833b31605c1SDominik Dingel }
834b31605c1SDominik Dingel 
835b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
836b31605c1SDominik Dingel {
837b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
838b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
839b31605c1SDominik Dingel 		return -ENOMEM;
840b31605c1SDominik Dingel 
841b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
842b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
843b31605c1SDominik Dingel 	return 0;
844b31605c1SDominik Dingel }
845b31605c1SDominik Dingel 
846b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
847b0c632dbSHeiko Carstens {
848b31605c1SDominik Dingel 	int rc = 0;
849b31288faSKonstantin Weitz 
8509e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
8519e6dabefSCornelia Huck 						    CPUSTAT_SM |
85269d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
85369d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
854fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
8557feb6bb8SMichael Mueller 	if (test_vfacility(50) && test_vfacility(73))
8567feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
8577feb6bb8SMichael Mueller 
85869d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
859ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
860217a4406SHeiko Carstens 	if (sclp_has_siif())
861217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
862ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
863ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
86478c4b59fSMichael Mueller 	vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
8655a5e6536SMatthew Rosato 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
8665a5e6536SMatthew Rosato 				      ICTL_TPROT;
8675a5e6536SMatthew Rosato 
868b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
869b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
870b31605c1SDominik Dingel 		if (rc)
871b31605c1SDominik Dingel 			return rc;
872b31288faSKonstantin Weitz 	}
8730ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
874ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
875453423dcSChristian Borntraeger 	get_cpu_id(&vcpu->arch.cpu_id);
87692e6ecf3SChristian Borntraeger 	vcpu->arch.cpu_id.version = 0xff;
8775102ee87STony Krowiak 
8785102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
8795102ee87STony Krowiak 
880b31605c1SDominik Dingel 	return rc;
881b0c632dbSHeiko Carstens }
882b0c632dbSHeiko Carstens 
883b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
884b0c632dbSHeiko Carstens 				      unsigned int id)
885b0c632dbSHeiko Carstens {
8864d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
8877feb6bb8SMichael Mueller 	struct sie_page *sie_page;
8884d47555aSCarsten Otte 	int rc = -EINVAL;
889b0c632dbSHeiko Carstens 
8904d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
8914d47555aSCarsten Otte 		goto out;
8924d47555aSCarsten Otte 
8934d47555aSCarsten Otte 	rc = -ENOMEM;
8944d47555aSCarsten Otte 
895b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
896b0c632dbSHeiko Carstens 	if (!vcpu)
8974d47555aSCarsten Otte 		goto out;
898b0c632dbSHeiko Carstens 
8997feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
9007feb6bb8SMichael Mueller 	if (!sie_page)
901b0c632dbSHeiko Carstens 		goto out_free_cpu;
902b0c632dbSHeiko Carstens 
9037feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
9047feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
9057feb6bb8SMichael Mueller 
906b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
90758f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
90858f9460bSCarsten Otte 		if (!kvm->arch.sca) {
90958f9460bSCarsten Otte 			WARN_ON_ONCE(1);
91058f9460bSCarsten Otte 			goto out_free_cpu;
91158f9460bSCarsten Otte 		}
912abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
91358f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
91458f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
91558f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
91658f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
917b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
918fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
91958f9460bSCarsten Otte 	}
920b0c632dbSHeiko Carstens 
921ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
922ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
923d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
9245288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
925ba5c1e9bSCarsten Otte 
926b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
927b0c632dbSHeiko Carstens 	if (rc)
9287b06bf2fSWei Yongjun 		goto out_free_sie_block;
929b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
930b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
931ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
932b0c632dbSHeiko Carstens 
933b0c632dbSHeiko Carstens 	return vcpu;
9347b06bf2fSWei Yongjun out_free_sie_block:
9357b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
936b0c632dbSHeiko Carstens out_free_cpu:
937b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
9384d47555aSCarsten Otte out:
939b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
940b0c632dbSHeiko Carstens }
941b0c632dbSHeiko Carstens 
942b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
943b0c632dbSHeiko Carstens {
9449a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
945b0c632dbSHeiko Carstens }
946b0c632dbSHeiko Carstens 
94749b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
94849b99e1eSChristian Borntraeger {
94949b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
95049b99e1eSChristian Borntraeger }
95149b99e1eSChristian Borntraeger 
95249b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
95349b99e1eSChristian Borntraeger {
95449b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
95549b99e1eSChristian Borntraeger }
95649b99e1eSChristian Borntraeger 
95749b99e1eSChristian Borntraeger /*
95849b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
95949b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
96049b99e1eSChristian Borntraeger  * return immediately. */
96149b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
96249b99e1eSChristian Borntraeger {
96349b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
96449b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
96549b99e1eSChristian Borntraeger 		cpu_relax();
96649b99e1eSChristian Borntraeger }
96749b99e1eSChristian Borntraeger 
96849b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
96949b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
97049b99e1eSChristian Borntraeger {
97149b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
97249b99e1eSChristian Borntraeger 	exit_sie(vcpu);
97349b99e1eSChristian Borntraeger }
97449b99e1eSChristian Borntraeger 
9752c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
9762c70fe44SChristian Borntraeger {
9772c70fe44SChristian Borntraeger 	int i;
9782c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
9792c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
9802c70fe44SChristian Borntraeger 
9812c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
9822c70fe44SChristian Borntraeger 		/* match against both prefix pages */
983fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
9842c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
9852c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
9862c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
9872c70fe44SChristian Borntraeger 		}
9882c70fe44SChristian Borntraeger 	}
9892c70fe44SChristian Borntraeger }
9902c70fe44SChristian Borntraeger 
991b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
992b6d33834SChristoffer Dall {
993b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
994b6d33834SChristoffer Dall 	BUG();
995b6d33834SChristoffer Dall 	return 0;
996b6d33834SChristoffer Dall }
997b6d33834SChristoffer Dall 
99814eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
99914eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
100014eebd91SCarsten Otte {
100114eebd91SCarsten Otte 	int r = -EINVAL;
100214eebd91SCarsten Otte 
100314eebd91SCarsten Otte 	switch (reg->id) {
100429b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
100529b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
100629b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
100729b7c71bSCarsten Otte 		break;
100829b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
100929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
101029b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
101129b7c71bSCarsten Otte 		break;
101246a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
101346a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
101446a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
101546a6dd1cSJason J. herne 		break;
101646a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
101746a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
101846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
101946a6dd1cSJason J. herne 		break;
1020536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1021536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1022536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1023536336c2SDominik Dingel 		break;
1024536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1025536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1026536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1027536336c2SDominik Dingel 		break;
1028536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1029536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1030536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1031536336c2SDominik Dingel 		break;
1032672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1033672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1034672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1035672550fbSChristian Borntraeger 		break;
1036afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1037afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1038afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1039afa45ff5SChristian Borntraeger 		break;
104014eebd91SCarsten Otte 	default:
104114eebd91SCarsten Otte 		break;
104214eebd91SCarsten Otte 	}
104314eebd91SCarsten Otte 
104414eebd91SCarsten Otte 	return r;
104514eebd91SCarsten Otte }
104614eebd91SCarsten Otte 
104714eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
104814eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
104914eebd91SCarsten Otte {
105014eebd91SCarsten Otte 	int r = -EINVAL;
105114eebd91SCarsten Otte 
105214eebd91SCarsten Otte 	switch (reg->id) {
105329b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
105429b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
105529b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
105629b7c71bSCarsten Otte 		break;
105729b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
105829b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
105929b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
106029b7c71bSCarsten Otte 		break;
106146a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
106246a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
106346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
106446a6dd1cSJason J. herne 		break;
106546a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
106646a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
106746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
106846a6dd1cSJason J. herne 		break;
1069536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1070536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1071536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
10729fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
10739fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1074536336c2SDominik Dingel 		break;
1075536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1076536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1077536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1078536336c2SDominik Dingel 		break;
1079536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1080536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1081536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1082536336c2SDominik Dingel 		break;
1083672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1084672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1085672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1086672550fbSChristian Borntraeger 		break;
1087afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1088afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1089afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1090afa45ff5SChristian Borntraeger 		break;
109114eebd91SCarsten Otte 	default:
109214eebd91SCarsten Otte 		break;
109314eebd91SCarsten Otte 	}
109414eebd91SCarsten Otte 
109514eebd91SCarsten Otte 	return r;
109614eebd91SCarsten Otte }
1097b6d33834SChristoffer Dall 
1098b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1099b0c632dbSHeiko Carstens {
1100b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1101b0c632dbSHeiko Carstens 	return 0;
1102b0c632dbSHeiko Carstens }
1103b0c632dbSHeiko Carstens 
1104b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1105b0c632dbSHeiko Carstens {
11065a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1107b0c632dbSHeiko Carstens 	return 0;
1108b0c632dbSHeiko Carstens }
1109b0c632dbSHeiko Carstens 
1110b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1111b0c632dbSHeiko Carstens {
11125a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1113b0c632dbSHeiko Carstens 	return 0;
1114b0c632dbSHeiko Carstens }
1115b0c632dbSHeiko Carstens 
1116b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1117b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1118b0c632dbSHeiko Carstens {
111959674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1120b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
112159674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1122b0c632dbSHeiko Carstens 	return 0;
1123b0c632dbSHeiko Carstens }
1124b0c632dbSHeiko Carstens 
1125b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1126b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1127b0c632dbSHeiko Carstens {
112859674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1129b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1130b0c632dbSHeiko Carstens 	return 0;
1131b0c632dbSHeiko Carstens }
1132b0c632dbSHeiko Carstens 
1133b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1134b0c632dbSHeiko Carstens {
11354725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
11364725c860SMartin Schwidefsky 		return -EINVAL;
1137b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
11384725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
11394725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
11404725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1141b0c632dbSHeiko Carstens 	return 0;
1142b0c632dbSHeiko Carstens }
1143b0c632dbSHeiko Carstens 
1144b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1145b0c632dbSHeiko Carstens {
1146b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1147b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1148b0c632dbSHeiko Carstens 	return 0;
1149b0c632dbSHeiko Carstens }
1150b0c632dbSHeiko Carstens 
1151b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1152b0c632dbSHeiko Carstens {
1153b0c632dbSHeiko Carstens 	int rc = 0;
1154b0c632dbSHeiko Carstens 
11557a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1156b0c632dbSHeiko Carstens 		rc = -EBUSY;
1157d7b0b5ebSCarsten Otte 	else {
1158d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1159d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1160d7b0b5ebSCarsten Otte 	}
1161b0c632dbSHeiko Carstens 	return rc;
1162b0c632dbSHeiko Carstens }
1163b0c632dbSHeiko Carstens 
1164b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1165b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1166b0c632dbSHeiko Carstens {
1167b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1168b0c632dbSHeiko Carstens }
1169b0c632dbSHeiko Carstens 
117027291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
117127291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
117227291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
117327291e21SDavid Hildenbrand 
1174d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1175d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1176b0c632dbSHeiko Carstens {
117727291e21SDavid Hildenbrand 	int rc = 0;
117827291e21SDavid Hildenbrand 
117927291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
118027291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
118127291e21SDavid Hildenbrand 
11822de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
118327291e21SDavid Hildenbrand 		return -EINVAL;
118427291e21SDavid Hildenbrand 
118527291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
118627291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
118727291e21SDavid Hildenbrand 		/* enforce guest PER */
118827291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
118927291e21SDavid Hildenbrand 
119027291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
119127291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
119227291e21SDavid Hildenbrand 	} else {
119327291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
119427291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
119527291e21SDavid Hildenbrand 	}
119627291e21SDavid Hildenbrand 
119727291e21SDavid Hildenbrand 	if (rc) {
119827291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
119927291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
120027291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
120127291e21SDavid Hildenbrand 	}
120227291e21SDavid Hildenbrand 
120327291e21SDavid Hildenbrand 	return rc;
1204b0c632dbSHeiko Carstens }
1205b0c632dbSHeiko Carstens 
120662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
120762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
120862d9f0dbSMarcelo Tosatti {
12096352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
12106352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
12116352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
121262d9f0dbSMarcelo Tosatti }
121362d9f0dbSMarcelo Tosatti 
121462d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
121562d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
121662d9f0dbSMarcelo Tosatti {
12176352e4d2SDavid Hildenbrand 	int rc = 0;
12186352e4d2SDavid Hildenbrand 
12196352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
12206352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
12216352e4d2SDavid Hildenbrand 
12226352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
12236352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
12246352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
12256352e4d2SDavid Hildenbrand 		break;
12266352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
12276352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
12286352e4d2SDavid Hildenbrand 		break;
12296352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
12306352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
12316352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
12326352e4d2SDavid Hildenbrand 	default:
12336352e4d2SDavid Hildenbrand 		rc = -ENXIO;
12346352e4d2SDavid Hildenbrand 	}
12356352e4d2SDavid Hildenbrand 
12366352e4d2SDavid Hildenbrand 	return rc;
123762d9f0dbSMarcelo Tosatti }
123862d9f0dbSMarcelo Tosatti 
1239b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1240b31605c1SDominik Dingel {
1241b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1242b31605c1SDominik Dingel 		return false;
1243b31605c1SDominik Dingel 	/* only enable for z10 and later */
1244b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1245b31605c1SDominik Dingel 		return false;
1246b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1247b31605c1SDominik Dingel 		return false;
1248b31605c1SDominik Dingel 	return true;
1249b31605c1SDominik Dingel }
1250b31605c1SDominik Dingel 
12518ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
12528ad35755SDavid Hildenbrand {
12538ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
12548ad35755SDavid Hildenbrand }
12558ad35755SDavid Hildenbrand 
12562c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
12572c70fe44SChristian Borntraeger {
12588ad35755SDavid Hildenbrand retry:
12598ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
12602c70fe44SChristian Borntraeger 	/*
12612c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
12622c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
12632c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
12642c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
12652c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
12662c70fe44SChristian Borntraeger 	 */
12678ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
12682c70fe44SChristian Borntraeger 		int rc;
12692c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1270fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
12712c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
12722c70fe44SChristian Borntraeger 		if (rc)
12732c70fe44SChristian Borntraeger 			return rc;
12748ad35755SDavid Hildenbrand 		goto retry;
12752c70fe44SChristian Borntraeger 	}
12768ad35755SDavid Hildenbrand 
1277d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1278d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1279d3d692c8SDavid Hildenbrand 		goto retry;
1280d3d692c8SDavid Hildenbrand 	}
1281d3d692c8SDavid Hildenbrand 
12828ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
12838ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
12848ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
12858ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
12868ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
12878ad35755SDavid Hildenbrand 		}
12888ad35755SDavid Hildenbrand 		goto retry;
12898ad35755SDavid Hildenbrand 	}
12908ad35755SDavid Hildenbrand 
12918ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
12928ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
12938ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
12948ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
12958ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
12968ad35755SDavid Hildenbrand 		}
12978ad35755SDavid Hildenbrand 		goto retry;
12988ad35755SDavid Hildenbrand 	}
12998ad35755SDavid Hildenbrand 
13000759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
13010759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
13020759d068SDavid Hildenbrand 
13032c70fe44SChristian Borntraeger 	return 0;
13042c70fe44SChristian Borntraeger }
13052c70fe44SChristian Borntraeger 
1306fa576c58SThomas Huth /**
1307fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1308fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1309fa576c58SThomas Huth  * @gpa: Guest physical address
1310fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1311fa576c58SThomas Huth  *
1312fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1313fa576c58SThomas Huth  *
1314fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1315fa576c58SThomas Huth  */
1316fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
131724eb3a82SDominik Dingel {
1318527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1319527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
132024eb3a82SDominik Dingel }
132124eb3a82SDominik Dingel 
13223c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
13233c038e6bSDominik Dingel 				      unsigned long token)
13243c038e6bSDominik Dingel {
13253c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1326383d0b05SJens Freimann 	struct kvm_s390_irq irq;
13273c038e6bSDominik Dingel 
13283c038e6bSDominik Dingel 	if (start_token) {
1329383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1330383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1331383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
13323c038e6bSDominik Dingel 	} else {
13333c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1334383d0b05SJens Freimann 		inti.parm64 = token;
13353c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
13363c038e6bSDominik Dingel 	}
13373c038e6bSDominik Dingel }
13383c038e6bSDominik Dingel 
13393c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
13403c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
13413c038e6bSDominik Dingel {
13423c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
13433c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
13443c038e6bSDominik Dingel }
13453c038e6bSDominik Dingel 
13463c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
13473c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
13483c038e6bSDominik Dingel {
13493c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
13503c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
13513c038e6bSDominik Dingel }
13523c038e6bSDominik Dingel 
13533c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
13543c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
13553c038e6bSDominik Dingel {
13563c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
13573c038e6bSDominik Dingel }
13583c038e6bSDominik Dingel 
13593c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
13603c038e6bSDominik Dingel {
13613c038e6bSDominik Dingel 	/*
13623c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
13633c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
13643c038e6bSDominik Dingel 	 */
13653c038e6bSDominik Dingel 	return true;
13663c038e6bSDominik Dingel }
13673c038e6bSDominik Dingel 
13683c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
13693c038e6bSDominik Dingel {
13703c038e6bSDominik Dingel 	hva_t hva;
13713c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
13723c038e6bSDominik Dingel 	int rc;
13733c038e6bSDominik Dingel 
13743c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
13753c038e6bSDominik Dingel 		return 0;
13763c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
13773c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
13783c038e6bSDominik Dingel 		return 0;
13793c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
13803c038e6bSDominik Dingel 		return 0;
13819a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
13823c038e6bSDominik Dingel 		return 0;
13833c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
13843c038e6bSDominik Dingel 		return 0;
13853c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
13863c038e6bSDominik Dingel 		return 0;
13873c038e6bSDominik Dingel 
138881480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
138981480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
139081480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
13913c038e6bSDominik Dingel 		return 0;
13923c038e6bSDominik Dingel 
13933c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
13943c038e6bSDominik Dingel 	return rc;
13953c038e6bSDominik Dingel }
13963c038e6bSDominik Dingel 
13973fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1398b0c632dbSHeiko Carstens {
13993fb4c40fSThomas Huth 	int rc, cpuflags;
1400e168bf8dSCarsten Otte 
14013c038e6bSDominik Dingel 	/*
14023c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
14033c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
14043c038e6bSDominik Dingel 	 * handled outside the worker.
14053c038e6bSDominik Dingel 	 */
14063c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
14073c038e6bSDominik Dingel 
14085a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1409b0c632dbSHeiko Carstens 
1410b0c632dbSHeiko Carstens 	if (need_resched())
1411b0c632dbSHeiko Carstens 		schedule();
1412b0c632dbSHeiko Carstens 
1413d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
141471cde587SChristian Borntraeger 		s390_handle_mcck();
141571cde587SChristian Borntraeger 
141679395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
141779395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
141879395031SJens Freimann 		if (rc)
141979395031SJens Freimann 			return rc;
142079395031SJens Freimann 	}
14210ff31867SCarsten Otte 
14222c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
14232c70fe44SChristian Borntraeger 	if (rc)
14242c70fe44SChristian Borntraeger 		return rc;
14252c70fe44SChristian Borntraeger 
142627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
142727291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
142827291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
142927291e21SDavid Hildenbrand 	}
143027291e21SDavid Hildenbrand 
1431b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
14323fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
14333fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
14343fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
14352b29a9fdSDominik Dingel 
14363fb4c40fSThomas Huth 	return 0;
14373fb4c40fSThomas Huth }
14383fb4c40fSThomas Huth 
14393fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
14403fb4c40fSThomas Huth {
144124eb3a82SDominik Dingel 	int rc = -1;
14422b29a9fdSDominik Dingel 
14432b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
14442b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
14452b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
14462b29a9fdSDominik Dingel 
144727291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
144827291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
144927291e21SDavid Hildenbrand 
14503fb4c40fSThomas Huth 	if (exit_reason >= 0) {
14517c470539SMartin Schwidefsky 		rc = 0;
1452210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1453210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1454210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1455210b1607SThomas Huth 						current->thread.gmap_addr;
1456210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1457210b1607SThomas Huth 		rc = -EREMOTE;
145824eb3a82SDominik Dingel 
145924eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
14603c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
146124eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1462fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
146324eb3a82SDominik Dingel 			rc = 0;
1464fa576c58SThomas Huth 		} else {
1465fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1466fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1467fa576c58SThomas Huth 		}
146824eb3a82SDominik Dingel 	}
146924eb3a82SDominik Dingel 
147024eb3a82SDominik Dingel 	if (rc == -1) {
1471699bde3bSChristian Borntraeger 		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1472699bde3bSChristian Borntraeger 		trace_kvm_s390_sie_fault(vcpu);
1473699bde3bSChristian Borntraeger 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
14741f0d0f09SCarsten Otte 	}
1475b0c632dbSHeiko Carstens 
14765a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
14773fb4c40fSThomas Huth 
1478a76ccff6SThomas Huth 	if (rc == 0) {
1479a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
14802955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
14812955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1482a76ccff6SThomas Huth 		else
1483a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1484a76ccff6SThomas Huth 	}
1485a76ccff6SThomas Huth 
14863fb4c40fSThomas Huth 	return rc;
14873fb4c40fSThomas Huth }
14883fb4c40fSThomas Huth 
14893fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
14903fb4c40fSThomas Huth {
14913fb4c40fSThomas Huth 	int rc, exit_reason;
14923fb4c40fSThomas Huth 
1493800c1065SThomas Huth 	/*
1494800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1495800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1496800c1065SThomas Huth 	 */
1497800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1498800c1065SThomas Huth 
1499a76ccff6SThomas Huth 	do {
15003fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
15013fb4c40fSThomas Huth 		if (rc)
1502a76ccff6SThomas Huth 			break;
15033fb4c40fSThomas Huth 
1504800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
15053fb4c40fSThomas Huth 		/*
1506a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1507a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
15083fb4c40fSThomas Huth 		 */
15093fb4c40fSThomas Huth 		preempt_disable();
15103fb4c40fSThomas Huth 		kvm_guest_enter();
15113fb4c40fSThomas Huth 		preempt_enable();
1512a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1513a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
15143fb4c40fSThomas Huth 		kvm_guest_exit();
1515800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
15163fb4c40fSThomas Huth 
15173fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
151827291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
15193fb4c40fSThomas Huth 
1520800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1521e168bf8dSCarsten Otte 	return rc;
1522b0c632dbSHeiko Carstens }
1523b0c632dbSHeiko Carstens 
1524b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1525b028ee3eSDavid Hildenbrand {
1526b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1527b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1528b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1529b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1530b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1531b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1532d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1533d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1534b028ee3eSDavid Hildenbrand 	}
1535b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1536b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1537b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1538b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1539b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1540b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1541b028ee3eSDavid Hildenbrand 	}
1542b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1543b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1544b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1545b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
15469fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
15479fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1548b028ee3eSDavid Hildenbrand 	}
1549b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1550b028ee3eSDavid Hildenbrand }
1551b028ee3eSDavid Hildenbrand 
1552b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1553b028ee3eSDavid Hildenbrand {
1554b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1555b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1556b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1557b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1558b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1559b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1560b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1561b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1562b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1563b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1564b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1565b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1566b028ee3eSDavid Hildenbrand }
1567b028ee3eSDavid Hildenbrand 
1568b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1569b0c632dbSHeiko Carstens {
15708f2abe6aSChristian Borntraeger 	int rc;
1571b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1572b0c632dbSHeiko Carstens 
157327291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
157427291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
157527291e21SDavid Hildenbrand 		return 0;
157627291e21SDavid Hildenbrand 	}
157727291e21SDavid Hildenbrand 
1578b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1579b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1580b0c632dbSHeiko Carstens 
15816352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
15826852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
15836352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
15846352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
15856352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
15866352e4d2SDavid Hildenbrand 		return -EINVAL;
15876352e4d2SDavid Hildenbrand 	}
1588b0c632dbSHeiko Carstens 
1589b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1590d7b0b5ebSCarsten Otte 
1591dab4079dSHeiko Carstens 	might_fault();
1592e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
15939ace903dSChristian Ehrhardt 
1594b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1595b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
15968f2abe6aSChristian Borntraeger 		rc = -EINTR;
1597b1d16c49SChristian Ehrhardt 	}
15988f2abe6aSChristian Borntraeger 
159927291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
160027291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
160127291e21SDavid Hildenbrand 		rc = 0;
160227291e21SDavid Hildenbrand 	}
160327291e21SDavid Hildenbrand 
1604b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
16058f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
16068f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
16078f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
16088f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
16098f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
16108f2abe6aSChristian Borntraeger 		rc = 0;
16118f2abe6aSChristian Borntraeger 	}
16128f2abe6aSChristian Borntraeger 
16138f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
16148f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
16158f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
16168f2abe6aSChristian Borntraeger 		rc = 0;
16178f2abe6aSChristian Borntraeger 	}
16188f2abe6aSChristian Borntraeger 
1619b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1620d7b0b5ebSCarsten Otte 
1621b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1622b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1623b0c632dbSHeiko Carstens 
1624b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
16257e8e6ab4SHeiko Carstens 	return rc;
1626b0c632dbSHeiko Carstens }
1627b0c632dbSHeiko Carstens 
1628b0c632dbSHeiko Carstens /*
1629b0c632dbSHeiko Carstens  * store status at address
1630b0c632dbSHeiko Carstens  * we use have two special cases:
1631b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1632b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1633b0c632dbSHeiko Carstens  */
1634d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1635b0c632dbSHeiko Carstens {
1636092670cdSCarsten Otte 	unsigned char archmode = 1;
1637fda902cbSMichael Mueller 	unsigned int px;
1638178bd789SThomas Huth 	u64 clkcomp;
1639d0bce605SHeiko Carstens 	int rc;
1640b0c632dbSHeiko Carstens 
1641d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1642d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1643b0c632dbSHeiko Carstens 			return -EFAULT;
1644d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1645d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1646d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1647b0c632dbSHeiko Carstens 			return -EFAULT;
1648d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1649d0bce605SHeiko Carstens 	}
1650d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1651d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1652d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1653d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1654d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1655d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
1656fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
1657d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1658fda902cbSMichael Mueller 			      &px, 4);
1659d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
1660d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
1661d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
1662d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1663d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
1664d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1665d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
1666178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
1667d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1668d0bce605SHeiko Carstens 			      &clkcomp, 8);
1669d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1670d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
1671d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1672d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
1673d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
1674b0c632dbSHeiko Carstens }
1675b0c632dbSHeiko Carstens 
1676e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1677e879892cSThomas Huth {
1678e879892cSThomas Huth 	/*
1679e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1680e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
1681e879892cSThomas Huth 	 * it into the save area
1682e879892cSThomas Huth 	 */
1683e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1684e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1685e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
1686e879892cSThomas Huth 
1687e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
1688e879892cSThomas Huth }
1689e879892cSThomas Huth 
16908ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
16918ad35755SDavid Hildenbrand {
16928ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
16938ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
16948ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
16958ad35755SDavid Hildenbrand }
16968ad35755SDavid Hildenbrand 
16978ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
16988ad35755SDavid Hildenbrand {
16998ad35755SDavid Hildenbrand 	unsigned int i;
17008ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
17018ad35755SDavid Hildenbrand 
17028ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
17038ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
17048ad35755SDavid Hildenbrand 	}
17058ad35755SDavid Hildenbrand }
17068ad35755SDavid Hildenbrand 
17078ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
17088ad35755SDavid Hildenbrand {
17098ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
17108ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
17118ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
17128ad35755SDavid Hildenbrand }
17138ad35755SDavid Hildenbrand 
17146852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
17156852d7b6SDavid Hildenbrand {
17168ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
17178ad35755SDavid Hildenbrand 
17188ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
17198ad35755SDavid Hildenbrand 		return;
17208ad35755SDavid Hildenbrand 
17216852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
17228ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1723433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
17248ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
17258ad35755SDavid Hildenbrand 
17268ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
17278ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
17288ad35755SDavid Hildenbrand 			started_vcpus++;
17298ad35755SDavid Hildenbrand 	}
17308ad35755SDavid Hildenbrand 
17318ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
17328ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
17338ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
17348ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
17358ad35755SDavid Hildenbrand 		/*
17368ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
17378ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
17388ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
17398ad35755SDavid Hildenbrand 		 */
17408ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
17418ad35755SDavid Hildenbrand 	}
17428ad35755SDavid Hildenbrand 
17436852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
17448ad35755SDavid Hildenbrand 	/*
17458ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
17468ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
17478ad35755SDavid Hildenbrand 	 */
1748d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1749433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
17508ad35755SDavid Hildenbrand 	return;
17516852d7b6SDavid Hildenbrand }
17526852d7b6SDavid Hildenbrand 
17536852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
17546852d7b6SDavid Hildenbrand {
17558ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
17568ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
17578ad35755SDavid Hildenbrand 
17588ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
17598ad35755SDavid Hildenbrand 		return;
17608ad35755SDavid Hildenbrand 
17616852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
17628ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
1763433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
17648ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
17658ad35755SDavid Hildenbrand 
176632f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
17676cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
176832f5ff63SDavid Hildenbrand 
17696cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
17708ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
17718ad35755SDavid Hildenbrand 
17728ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
17738ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
17748ad35755SDavid Hildenbrand 			started_vcpus++;
17758ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
17768ad35755SDavid Hildenbrand 		}
17778ad35755SDavid Hildenbrand 	}
17788ad35755SDavid Hildenbrand 
17798ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
17808ad35755SDavid Hildenbrand 		/*
17818ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
17828ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
17838ad35755SDavid Hildenbrand 		 */
17848ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
17858ad35755SDavid Hildenbrand 	}
17868ad35755SDavid Hildenbrand 
1787433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
17888ad35755SDavid Hildenbrand 	return;
17896852d7b6SDavid Hildenbrand }
17906852d7b6SDavid Hildenbrand 
1791d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1792d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
1793d6712df9SCornelia Huck {
1794d6712df9SCornelia Huck 	int r;
1795d6712df9SCornelia Huck 
1796d6712df9SCornelia Huck 	if (cap->flags)
1797d6712df9SCornelia Huck 		return -EINVAL;
1798d6712df9SCornelia Huck 
1799d6712df9SCornelia Huck 	switch (cap->cap) {
1800fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
1801fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
1802fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
1803fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
1804fa6b7fe9SCornelia Huck 		}
1805fa6b7fe9SCornelia Huck 		r = 0;
1806fa6b7fe9SCornelia Huck 		break;
1807d6712df9SCornelia Huck 	default:
1808d6712df9SCornelia Huck 		r = -EINVAL;
1809d6712df9SCornelia Huck 		break;
1810d6712df9SCornelia Huck 	}
1811d6712df9SCornelia Huck 	return r;
1812d6712df9SCornelia Huck }
1813d6712df9SCornelia Huck 
1814b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
1815b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
1816b0c632dbSHeiko Carstens {
1817b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
1818b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
1819800c1065SThomas Huth 	int idx;
1820bc923cc9SAvi Kivity 	long r;
1821b0c632dbSHeiko Carstens 
182293736624SAvi Kivity 	switch (ioctl) {
182393736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
1824ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
1825383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
1826ba5c1e9bSCarsten Otte 
182793736624SAvi Kivity 		r = -EFAULT;
1828ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
182993736624SAvi Kivity 			break;
1830383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
1831383d0b05SJens Freimann 			return -EINVAL;
1832383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
183393736624SAvi Kivity 		break;
1834ba5c1e9bSCarsten Otte 	}
1835b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
1836800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1837bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
1838800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1839bc923cc9SAvi Kivity 		break;
1840b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
1841b0c632dbSHeiko Carstens 		psw_t psw;
1842b0c632dbSHeiko Carstens 
1843bc923cc9SAvi Kivity 		r = -EFAULT;
1844b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
1845bc923cc9SAvi Kivity 			break;
1846bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1847bc923cc9SAvi Kivity 		break;
1848b0c632dbSHeiko Carstens 	}
1849b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
1850bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1851bc923cc9SAvi Kivity 		break;
185214eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
185314eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
185414eebd91SCarsten Otte 		struct kvm_one_reg reg;
185514eebd91SCarsten Otte 		r = -EFAULT;
185614eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
185714eebd91SCarsten Otte 			break;
185814eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
185914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
186014eebd91SCarsten Otte 		else
186114eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
186214eebd91SCarsten Otte 		break;
186314eebd91SCarsten Otte 	}
186427e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
186527e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
186627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
186727e0393fSCarsten Otte 
186827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
186927e0393fSCarsten Otte 			r = -EFAULT;
187027e0393fSCarsten Otte 			break;
187127e0393fSCarsten Otte 		}
187227e0393fSCarsten Otte 
187327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
187427e0393fSCarsten Otte 			r = -EINVAL;
187527e0393fSCarsten Otte 			break;
187627e0393fSCarsten Otte 		}
187727e0393fSCarsten Otte 
187827e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
187927e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
188027e0393fSCarsten Otte 		break;
188127e0393fSCarsten Otte 	}
188227e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
188327e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
188427e0393fSCarsten Otte 
188527e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
188627e0393fSCarsten Otte 			r = -EFAULT;
188727e0393fSCarsten Otte 			break;
188827e0393fSCarsten Otte 		}
188927e0393fSCarsten Otte 
189027e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
189127e0393fSCarsten Otte 			r = -EINVAL;
189227e0393fSCarsten Otte 			break;
189327e0393fSCarsten Otte 		}
189427e0393fSCarsten Otte 
189527e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
189627e0393fSCarsten Otte 			ucasmap.length);
189727e0393fSCarsten Otte 		break;
189827e0393fSCarsten Otte 	}
189927e0393fSCarsten Otte #endif
1900ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
1901527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
1902ccc7910fSCarsten Otte 		break;
1903ccc7910fSCarsten Otte 	}
1904d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
1905d6712df9SCornelia Huck 	{
1906d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
1907d6712df9SCornelia Huck 		r = -EFAULT;
1908d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
1909d6712df9SCornelia Huck 			break;
1910d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1911d6712df9SCornelia Huck 		break;
1912d6712df9SCornelia Huck 	}
1913b0c632dbSHeiko Carstens 	default:
19143e6afcf1SCarsten Otte 		r = -ENOTTY;
1915b0c632dbSHeiko Carstens 	}
1916bc923cc9SAvi Kivity 	return r;
1917b0c632dbSHeiko Carstens }
1918b0c632dbSHeiko Carstens 
19195b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
19205b1c1493SCarsten Otte {
19215b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
19225b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
19235b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
19245b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
19255b1c1493SCarsten Otte 		get_page(vmf->page);
19265b1c1493SCarsten Otte 		return 0;
19275b1c1493SCarsten Otte 	}
19285b1c1493SCarsten Otte #endif
19295b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
19305b1c1493SCarsten Otte }
19315b1c1493SCarsten Otte 
19325587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
19335587027cSAneesh Kumar K.V 			    unsigned long npages)
1934db3fe4ebSTakuya Yoshikawa {
1935db3fe4ebSTakuya Yoshikawa 	return 0;
1936db3fe4ebSTakuya Yoshikawa }
1937db3fe4ebSTakuya Yoshikawa 
1938b0c632dbSHeiko Carstens /* Section: memory related */
1939f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
1940f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
19417b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
19427b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
1943b0c632dbSHeiko Carstens {
1944dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
1945dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
1946dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
1947dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
1948b0c632dbSHeiko Carstens 
1949598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
1950b0c632dbSHeiko Carstens 		return -EINVAL;
1951b0c632dbSHeiko Carstens 
1952598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
1953b0c632dbSHeiko Carstens 		return -EINVAL;
1954b0c632dbSHeiko Carstens 
1955f7784b8eSMarcelo Tosatti 	return 0;
1956f7784b8eSMarcelo Tosatti }
1957f7784b8eSMarcelo Tosatti 
1958f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
1959f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
19608482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
19618482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
1962f7784b8eSMarcelo Tosatti {
1963f7850c92SCarsten Otte 	int rc;
1964f7784b8eSMarcelo Tosatti 
19652cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
19662cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
19672cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
19682cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
19692cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
19702cef4debSChristian Borntraeger 	 */
19712cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
19722cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
19732cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
19742cef4debSChristian Borntraeger 		return;
1975598841caSCarsten Otte 
1976598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1977598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
1978598841caSCarsten Otte 	if (rc)
1979f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1980598841caSCarsten Otte 	return;
1981b0c632dbSHeiko Carstens }
1982b0c632dbSHeiko Carstens 
1983b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
1984b0c632dbSHeiko Carstens {
1985ef50f7acSChristian Borntraeger 	int ret;
19860ee75beaSAvi Kivity 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1987ef50f7acSChristian Borntraeger 	if (ret)
1988ef50f7acSChristian Borntraeger 		return ret;
1989ef50f7acSChristian Borntraeger 
1990ef50f7acSChristian Borntraeger 	/*
1991ef50f7acSChristian Borntraeger 	 * guests can ask for up to 255+1 double words, we need a full page
199225985edcSLucas De Marchi 	 * to hold the maximum amount of facilities. On the other hand, we
1993ef50f7acSChristian Borntraeger 	 * only set facilities that are known to work in KVM.
1994ef50f7acSChristian Borntraeger 	 */
199578c4b59fSMichael Mueller 	vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
199678c4b59fSMichael Mueller 	if (!vfacilities) {
1997ef50f7acSChristian Borntraeger 		kvm_exit();
1998ef50f7acSChristian Borntraeger 		return -ENOMEM;
1999ef50f7acSChristian Borntraeger 	}
200078c4b59fSMichael Mueller 	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
20017be81a46SChristian Borntraeger 	vfacilities[0] &= 0xff82fffbf47c2000UL;
20027feb6bb8SMichael Mueller 	vfacilities[1] &= 0x005c000000000000UL;
2003ef50f7acSChristian Borntraeger 	return 0;
2004b0c632dbSHeiko Carstens }
2005b0c632dbSHeiko Carstens 
2006b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2007b0c632dbSHeiko Carstens {
200878c4b59fSMichael Mueller 	free_page((unsigned long) vfacilities);
2009b0c632dbSHeiko Carstens 	kvm_exit();
2010b0c632dbSHeiko Carstens }
2011b0c632dbSHeiko Carstens 
2012b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2013b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2014566af940SCornelia Huck 
2015566af940SCornelia Huck /*
2016566af940SCornelia Huck  * Enable autoloading of the kvm module.
2017566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2018566af940SCornelia Huck  * since x86 takes a different approach.
2019566af940SCornelia Huck  */
2020566af940SCornelia Huck #include <linux/miscdevice.h>
2021566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2022566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2023