xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 41408c28f283b49202ae374b1c42bc8e9b33a048)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
28*41408c28SThomas Huth #include <linux/vmalloc.h>
29cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
30b0c632dbSHeiko Carstens #include <asm/lowcore.h>
31b0c632dbSHeiko Carstens #include <asm/pgtable.h>
32f5daba1dSHeiko Carstens #include <asm/nmi.h>
33a0616cdeSDavid Howells #include <asm/switch_to.h>
341526bf9cSChristian Borntraeger #include <asm/sclp.h>
358f2abe6aSChristian Borntraeger #include "kvm-s390.h"
36b0c632dbSHeiko Carstens #include "gaccess.h"
37b0c632dbSHeiko Carstens 
385786fffaSCornelia Huck #define CREATE_TRACE_POINTS
395786fffaSCornelia Huck #include "trace.h"
40ade38c31SCornelia Huck #include "trace-s390.h"
415786fffaSCornelia Huck 
42*41408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
43*41408c28SThomas Huth 
44b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45b0c632dbSHeiko Carstens 
46b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
47b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
480eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
498f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
508f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
518f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
528f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
53ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
54ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
56f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
57ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
58f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
59ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
60aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
61aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
62ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
637697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
64ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
7169d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
72453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
73453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
74453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
75453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
76453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
778a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
78453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
79453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
80b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
81453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
82453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
83bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
85bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
867697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
875288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
905288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
9142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
9242cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
93cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
945288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
955288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
965288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
100388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
101e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
10241628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
103b0c632dbSHeiko Carstens 	{ NULL }
104b0c632dbSHeiko Carstens };
105b0c632dbSHeiko Carstens 
1069d8d5786SMichael Mueller /* upper facilities limit for kvm */
1079d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
1089d8d5786SMichael Mueller 	0xff82fffbf4fc2000UL,
1099d8d5786SMichael Mueller 	0x005c000000000000UL,
11013211ea7SEric Farman 	0x4000000000000000UL,
1119d8d5786SMichael Mueller };
112b0c632dbSHeiko Carstens 
1139d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
11478c4b59fSMichael Mueller {
1159d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1169d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
11778c4b59fSMichael Mueller }
11878c4b59fSMichael Mueller 
1199d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
1209d8d5786SMichael Mueller 
121b0c632dbSHeiko Carstens /* Section: not file related */
12213a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
123b0c632dbSHeiko Carstens {
124b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
12510474ae8SAlexander Graf 	return 0;
126b0c632dbSHeiko Carstens }
127b0c632dbSHeiko Carstens 
1282c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1292c70fe44SChristian Borntraeger 
130b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
131b0c632dbSHeiko Carstens {
1322c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1332c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
134b0c632dbSHeiko Carstens 	return 0;
135b0c632dbSHeiko Carstens }
136b0c632dbSHeiko Carstens 
137b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
138b0c632dbSHeiko Carstens {
1392c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
140b0c632dbSHeiko Carstens }
141b0c632dbSHeiko Carstens 
142b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
143b0c632dbSHeiko Carstens {
14484877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
14584877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
146b0c632dbSHeiko Carstens }
147b0c632dbSHeiko Carstens 
148b0c632dbSHeiko Carstens /* Section: device related */
149b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
150b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
151b0c632dbSHeiko Carstens {
152b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
153b0c632dbSHeiko Carstens 		return s390_enable_sie();
154b0c632dbSHeiko Carstens 	return -EINVAL;
155b0c632dbSHeiko Carstens }
156b0c632dbSHeiko Carstens 
157784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
158b0c632dbSHeiko Carstens {
159d7b0b5ebSCarsten Otte 	int r;
160d7b0b5ebSCarsten Otte 
1612bd0ac4eSCarsten Otte 	switch (ext) {
162d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
163b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
16452e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1651efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1661efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1671efd0f59SCarsten Otte #endif
1683c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16960b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
17014eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
171d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
172fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
173ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
17410ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
175c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
176d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
17778599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
178f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1796352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
1802444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
181d7b0b5ebSCarsten Otte 		r = 1;
182d7b0b5ebSCarsten Otte 		break;
183*41408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
184*41408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
185*41408c28SThomas Huth 		break;
186e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
187e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
188e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
189e726b1bdSChristian Borntraeger 		break;
190e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
191e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
192e1e2e605SNick Wang 		break;
1931526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
194abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1951526bf9cSChristian Borntraeger 		break;
19668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
19768c55750SEric Farman 		r = MACHINE_HAS_VX;
19868c55750SEric Farman 		break;
1992bd0ac4eSCarsten Otte 	default:
200d7b0b5ebSCarsten Otte 		r = 0;
201b0c632dbSHeiko Carstens 	}
202d7b0b5ebSCarsten Otte 	return r;
2032bd0ac4eSCarsten Otte }
204b0c632dbSHeiko Carstens 
20515f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
20615f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
20715f36ebdSJason J. Herne {
20815f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
20915f36ebdSJason J. Herne 	unsigned long address;
21015f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
21115f36ebdSJason J. Herne 
21215f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
21315f36ebdSJason J. Herne 	/* Loop over all guest pages */
21415f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
21515f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
21615f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
21715f36ebdSJason J. Herne 
21815f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
21915f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
22015f36ebdSJason J. Herne 	}
22115f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
22215f36ebdSJason J. Herne }
22315f36ebdSJason J. Herne 
224b0c632dbSHeiko Carstens /* Section: vm related */
225b0c632dbSHeiko Carstens /*
226b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
227b0c632dbSHeiko Carstens  */
228b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
229b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
230b0c632dbSHeiko Carstens {
23115f36ebdSJason J. Herne 	int r;
23215f36ebdSJason J. Herne 	unsigned long n;
23315f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
23415f36ebdSJason J. Herne 	int is_dirty = 0;
23515f36ebdSJason J. Herne 
23615f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
23715f36ebdSJason J. Herne 
23815f36ebdSJason J. Herne 	r = -EINVAL;
23915f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
24015f36ebdSJason J. Herne 		goto out;
24115f36ebdSJason J. Herne 
24215f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
24315f36ebdSJason J. Herne 	r = -ENOENT;
24415f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
24515f36ebdSJason J. Herne 		goto out;
24615f36ebdSJason J. Herne 
24715f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
24815f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
24915f36ebdSJason J. Herne 	if (r)
25015f36ebdSJason J. Herne 		goto out;
25115f36ebdSJason J. Herne 
25215f36ebdSJason J. Herne 	/* Clear the dirty log */
25315f36ebdSJason J. Herne 	if (is_dirty) {
25415f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
25515f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
25615f36ebdSJason J. Herne 	}
25715f36ebdSJason J. Herne 	r = 0;
25815f36ebdSJason J. Herne out:
25915f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
26015f36ebdSJason J. Herne 	return r;
261b0c632dbSHeiko Carstens }
262b0c632dbSHeiko Carstens 
263d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
264d938dc55SCornelia Huck {
265d938dc55SCornelia Huck 	int r;
266d938dc55SCornelia Huck 
267d938dc55SCornelia Huck 	if (cap->flags)
268d938dc55SCornelia Huck 		return -EINVAL;
269d938dc55SCornelia Huck 
270d938dc55SCornelia Huck 	switch (cap->cap) {
27184223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
27284223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
27384223598SCornelia Huck 		r = 0;
27484223598SCornelia Huck 		break;
2752444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
2762444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
2772444b352SDavid Hildenbrand 		r = 0;
2782444b352SDavid Hildenbrand 		break;
27968c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
28068c55750SEric Farman 		kvm->arch.use_vectors = MACHINE_HAS_VX;
28168c55750SEric Farman 		r = MACHINE_HAS_VX ? 0 : -EINVAL;
28268c55750SEric Farman 		break;
283d938dc55SCornelia Huck 	default:
284d938dc55SCornelia Huck 		r = -EINVAL;
285d938dc55SCornelia Huck 		break;
286d938dc55SCornelia Huck 	}
287d938dc55SCornelia Huck 	return r;
288d938dc55SCornelia Huck }
289d938dc55SCornelia Huck 
2908c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2918c0a7ce6SDominik Dingel {
2928c0a7ce6SDominik Dingel 	int ret;
2938c0a7ce6SDominik Dingel 
2948c0a7ce6SDominik Dingel 	switch (attr->attr) {
2958c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2968c0a7ce6SDominik Dingel 		ret = 0;
2978c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2988c0a7ce6SDominik Dingel 			ret = -EFAULT;
2998c0a7ce6SDominik Dingel 		break;
3008c0a7ce6SDominik Dingel 	default:
3018c0a7ce6SDominik Dingel 		ret = -ENXIO;
3028c0a7ce6SDominik Dingel 		break;
3038c0a7ce6SDominik Dingel 	}
3048c0a7ce6SDominik Dingel 	return ret;
3058c0a7ce6SDominik Dingel }
3068c0a7ce6SDominik Dingel 
3078c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3084f718eabSDominik Dingel {
3094f718eabSDominik Dingel 	int ret;
3104f718eabSDominik Dingel 	unsigned int idx;
3114f718eabSDominik Dingel 	switch (attr->attr) {
3124f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
3134f718eabSDominik Dingel 		ret = -EBUSY;
3144f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3154f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3164f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
3174f718eabSDominik Dingel 			ret = 0;
3184f718eabSDominik Dingel 		}
3194f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3204f718eabSDominik Dingel 		break;
3214f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3224f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3234f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
324a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3254f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3264f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3274f718eabSDominik Dingel 		ret = 0;
3284f718eabSDominik Dingel 		break;
3298c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3308c0a7ce6SDominik Dingel 		unsigned long new_limit;
3318c0a7ce6SDominik Dingel 
3328c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3338c0a7ce6SDominik Dingel 			return -EINVAL;
3348c0a7ce6SDominik Dingel 
3358c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3368c0a7ce6SDominik Dingel 			return -EFAULT;
3378c0a7ce6SDominik Dingel 
3388c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3398c0a7ce6SDominik Dingel 			return -E2BIG;
3408c0a7ce6SDominik Dingel 
3418c0a7ce6SDominik Dingel 		ret = -EBUSY;
3428c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3438c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3448c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3458c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3468c0a7ce6SDominik Dingel 
3478c0a7ce6SDominik Dingel 			if (!new) {
3488c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3498c0a7ce6SDominik Dingel 			} else {
3508c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3518c0a7ce6SDominik Dingel 				new->private = kvm;
3528c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3538c0a7ce6SDominik Dingel 				ret = 0;
3548c0a7ce6SDominik Dingel 			}
3558c0a7ce6SDominik Dingel 		}
3568c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3578c0a7ce6SDominik Dingel 		break;
3588c0a7ce6SDominik Dingel 	}
3594f718eabSDominik Dingel 	default:
3604f718eabSDominik Dingel 		ret = -ENXIO;
3614f718eabSDominik Dingel 		break;
3624f718eabSDominik Dingel 	}
3634f718eabSDominik Dingel 	return ret;
3644f718eabSDominik Dingel }
3654f718eabSDominik Dingel 
366a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
367a374e892STony Krowiak 
368a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
369a374e892STony Krowiak {
370a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
371a374e892STony Krowiak 	int i;
372a374e892STony Krowiak 
3739d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
374a374e892STony Krowiak 		return -EINVAL;
375a374e892STony Krowiak 
376a374e892STony Krowiak 	mutex_lock(&kvm->lock);
377a374e892STony Krowiak 	switch (attr->attr) {
378a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
379a374e892STony Krowiak 		get_random_bytes(
380a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
381a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
382a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
383a374e892STony Krowiak 		break;
384a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
385a374e892STony Krowiak 		get_random_bytes(
386a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
387a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
388a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
389a374e892STony Krowiak 		break;
390a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
391a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
392a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
393a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
394a374e892STony Krowiak 		break;
395a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
396a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
397a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
398a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
399a374e892STony Krowiak 		break;
400a374e892STony Krowiak 	default:
401a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
402a374e892STony Krowiak 		return -ENXIO;
403a374e892STony Krowiak 	}
404a374e892STony Krowiak 
405a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
406a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
407a374e892STony Krowiak 		exit_sie(vcpu);
408a374e892STony Krowiak 	}
409a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
410a374e892STony Krowiak 	return 0;
411a374e892STony Krowiak }
412a374e892STony Krowiak 
41372f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
41472f25020SJason J. Herne {
41572f25020SJason J. Herne 	u8 gtod_high;
41672f25020SJason J. Herne 
41772f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
41872f25020SJason J. Herne 					   sizeof(gtod_high)))
41972f25020SJason J. Herne 		return -EFAULT;
42072f25020SJason J. Herne 
42172f25020SJason J. Herne 	if (gtod_high != 0)
42272f25020SJason J. Herne 		return -EINVAL;
42372f25020SJason J. Herne 
42472f25020SJason J. Herne 	return 0;
42572f25020SJason J. Herne }
42672f25020SJason J. Herne 
42772f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
42872f25020SJason J. Herne {
42972f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
43072f25020SJason J. Herne 	unsigned int vcpu_idx;
43172f25020SJason J. Herne 	u64 host_tod, gtod;
43272f25020SJason J. Herne 	int r;
43372f25020SJason J. Herne 
43472f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
43572f25020SJason J. Herne 		return -EFAULT;
43672f25020SJason J. Herne 
43772f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
43872f25020SJason J. Herne 	if (r)
43972f25020SJason J. Herne 		return r;
44072f25020SJason J. Herne 
44172f25020SJason J. Herne 	mutex_lock(&kvm->lock);
44272f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
44372f25020SJason J. Herne 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
44472f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
44572f25020SJason J. Herne 		exit_sie(cur_vcpu);
44672f25020SJason J. Herne 	}
44772f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
44872f25020SJason J. Herne 	return 0;
44972f25020SJason J. Herne }
45072f25020SJason J. Herne 
45172f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
45272f25020SJason J. Herne {
45372f25020SJason J. Herne 	int ret;
45472f25020SJason J. Herne 
45572f25020SJason J. Herne 	if (attr->flags)
45672f25020SJason J. Herne 		return -EINVAL;
45772f25020SJason J. Herne 
45872f25020SJason J. Herne 	switch (attr->attr) {
45972f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
46072f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
46172f25020SJason J. Herne 		break;
46272f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
46372f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
46472f25020SJason J. Herne 		break;
46572f25020SJason J. Herne 	default:
46672f25020SJason J. Herne 		ret = -ENXIO;
46772f25020SJason J. Herne 		break;
46872f25020SJason J. Herne 	}
46972f25020SJason J. Herne 	return ret;
47072f25020SJason J. Herne }
47172f25020SJason J. Herne 
47272f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
47372f25020SJason J. Herne {
47472f25020SJason J. Herne 	u8 gtod_high = 0;
47572f25020SJason J. Herne 
47672f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
47772f25020SJason J. Herne 					 sizeof(gtod_high)))
47872f25020SJason J. Herne 		return -EFAULT;
47972f25020SJason J. Herne 
48072f25020SJason J. Herne 	return 0;
48172f25020SJason J. Herne }
48272f25020SJason J. Herne 
48372f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
48472f25020SJason J. Herne {
48572f25020SJason J. Herne 	u64 host_tod, gtod;
48672f25020SJason J. Herne 	int r;
48772f25020SJason J. Herne 
48872f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
48972f25020SJason J. Herne 	if (r)
49072f25020SJason J. Herne 		return r;
49172f25020SJason J. Herne 
49272f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
49372f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
49472f25020SJason J. Herne 		return -EFAULT;
49572f25020SJason J. Herne 
49672f25020SJason J. Herne 	return 0;
49772f25020SJason J. Herne }
49872f25020SJason J. Herne 
49972f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
50072f25020SJason J. Herne {
50172f25020SJason J. Herne 	int ret;
50272f25020SJason J. Herne 
50372f25020SJason J. Herne 	if (attr->flags)
50472f25020SJason J. Herne 		return -EINVAL;
50572f25020SJason J. Herne 
50672f25020SJason J. Herne 	switch (attr->attr) {
50772f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
50872f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
50972f25020SJason J. Herne 		break;
51072f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
51172f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
51272f25020SJason J. Herne 		break;
51372f25020SJason J. Herne 	default:
51472f25020SJason J. Herne 		ret = -ENXIO;
51572f25020SJason J. Herne 		break;
51672f25020SJason J. Herne 	}
51772f25020SJason J. Herne 	return ret;
51872f25020SJason J. Herne }
51972f25020SJason J. Herne 
520658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
521658b6edaSMichael Mueller {
522658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
523658b6edaSMichael Mueller 	int ret = 0;
524658b6edaSMichael Mueller 
525658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
526658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
527658b6edaSMichael Mueller 		ret = -EBUSY;
528658b6edaSMichael Mueller 		goto out;
529658b6edaSMichael Mueller 	}
530658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
531658b6edaSMichael Mueller 	if (!proc) {
532658b6edaSMichael Mueller 		ret = -ENOMEM;
533658b6edaSMichael Mueller 		goto out;
534658b6edaSMichael Mueller 	}
535658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
536658b6edaSMichael Mueller 			    sizeof(*proc))) {
537658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
538658b6edaSMichael Mueller 		       sizeof(struct cpuid));
539658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
540981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
541658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
542658b6edaSMichael Mueller 	} else
543658b6edaSMichael Mueller 		ret = -EFAULT;
544658b6edaSMichael Mueller 	kfree(proc);
545658b6edaSMichael Mueller out:
546658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
547658b6edaSMichael Mueller 	return ret;
548658b6edaSMichael Mueller }
549658b6edaSMichael Mueller 
550658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
551658b6edaSMichael Mueller {
552658b6edaSMichael Mueller 	int ret = -ENXIO;
553658b6edaSMichael Mueller 
554658b6edaSMichael Mueller 	switch (attr->attr) {
555658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
556658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
557658b6edaSMichael Mueller 		break;
558658b6edaSMichael Mueller 	}
559658b6edaSMichael Mueller 	return ret;
560658b6edaSMichael Mueller }
561658b6edaSMichael Mueller 
562658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
563658b6edaSMichael Mueller {
564658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
565658b6edaSMichael Mueller 	int ret = 0;
566658b6edaSMichael Mueller 
567658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
568658b6edaSMichael Mueller 	if (!proc) {
569658b6edaSMichael Mueller 		ret = -ENOMEM;
570658b6edaSMichael Mueller 		goto out;
571658b6edaSMichael Mueller 	}
572658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
573658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
574981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
575658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
576658b6edaSMichael Mueller 		ret = -EFAULT;
577658b6edaSMichael Mueller 	kfree(proc);
578658b6edaSMichael Mueller out:
579658b6edaSMichael Mueller 	return ret;
580658b6edaSMichael Mueller }
581658b6edaSMichael Mueller 
582658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
583658b6edaSMichael Mueller {
584658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
585658b6edaSMichael Mueller 	int ret = 0;
586658b6edaSMichael Mueller 
587658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
588658b6edaSMichael Mueller 	if (!mach) {
589658b6edaSMichael Mueller 		ret = -ENOMEM;
590658b6edaSMichael Mueller 		goto out;
591658b6edaSMichael Mueller 	}
592658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
593658b6edaSMichael Mueller 	mach->ibc = sclp_get_ibc();
594981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
595981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
596658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
59794422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
598658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
599658b6edaSMichael Mueller 		ret = -EFAULT;
600658b6edaSMichael Mueller 	kfree(mach);
601658b6edaSMichael Mueller out:
602658b6edaSMichael Mueller 	return ret;
603658b6edaSMichael Mueller }
604658b6edaSMichael Mueller 
605658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
606658b6edaSMichael Mueller {
607658b6edaSMichael Mueller 	int ret = -ENXIO;
608658b6edaSMichael Mueller 
609658b6edaSMichael Mueller 	switch (attr->attr) {
610658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
611658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
612658b6edaSMichael Mueller 		break;
613658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
614658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
615658b6edaSMichael Mueller 		break;
616658b6edaSMichael Mueller 	}
617658b6edaSMichael Mueller 	return ret;
618658b6edaSMichael Mueller }
619658b6edaSMichael Mueller 
620f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
621f2061656SDominik Dingel {
622f2061656SDominik Dingel 	int ret;
623f2061656SDominik Dingel 
624f2061656SDominik Dingel 	switch (attr->group) {
6254f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6268c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
6274f718eabSDominik Dingel 		break;
62872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
62972f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
63072f25020SJason J. Herne 		break;
631658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
632658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
633658b6edaSMichael Mueller 		break;
634a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
635a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
636a374e892STony Krowiak 		break;
637f2061656SDominik Dingel 	default:
638f2061656SDominik Dingel 		ret = -ENXIO;
639f2061656SDominik Dingel 		break;
640f2061656SDominik Dingel 	}
641f2061656SDominik Dingel 
642f2061656SDominik Dingel 	return ret;
643f2061656SDominik Dingel }
644f2061656SDominik Dingel 
645f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
646f2061656SDominik Dingel {
6478c0a7ce6SDominik Dingel 	int ret;
6488c0a7ce6SDominik Dingel 
6498c0a7ce6SDominik Dingel 	switch (attr->group) {
6508c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6518c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
6528c0a7ce6SDominik Dingel 		break;
65372f25020SJason J. Herne 	case KVM_S390_VM_TOD:
65472f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
65572f25020SJason J. Herne 		break;
656658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
657658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
658658b6edaSMichael Mueller 		break;
6598c0a7ce6SDominik Dingel 	default:
6608c0a7ce6SDominik Dingel 		ret = -ENXIO;
6618c0a7ce6SDominik Dingel 		break;
6628c0a7ce6SDominik Dingel 	}
6638c0a7ce6SDominik Dingel 
6648c0a7ce6SDominik Dingel 	return ret;
665f2061656SDominik Dingel }
666f2061656SDominik Dingel 
667f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
668f2061656SDominik Dingel {
669f2061656SDominik Dingel 	int ret;
670f2061656SDominik Dingel 
671f2061656SDominik Dingel 	switch (attr->group) {
6724f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6734f718eabSDominik Dingel 		switch (attr->attr) {
6744f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
6754f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
6768c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
6774f718eabSDominik Dingel 			ret = 0;
6784f718eabSDominik Dingel 			break;
6794f718eabSDominik Dingel 		default:
6804f718eabSDominik Dingel 			ret = -ENXIO;
6814f718eabSDominik Dingel 			break;
6824f718eabSDominik Dingel 		}
6834f718eabSDominik Dingel 		break;
68472f25020SJason J. Herne 	case KVM_S390_VM_TOD:
68572f25020SJason J. Herne 		switch (attr->attr) {
68672f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
68772f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
68872f25020SJason J. Herne 			ret = 0;
68972f25020SJason J. Herne 			break;
69072f25020SJason J. Herne 		default:
69172f25020SJason J. Herne 			ret = -ENXIO;
69272f25020SJason J. Herne 			break;
69372f25020SJason J. Herne 		}
69472f25020SJason J. Herne 		break;
695658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
696658b6edaSMichael Mueller 		switch (attr->attr) {
697658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
698658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
699658b6edaSMichael Mueller 			ret = 0;
700658b6edaSMichael Mueller 			break;
701658b6edaSMichael Mueller 		default:
702658b6edaSMichael Mueller 			ret = -ENXIO;
703658b6edaSMichael Mueller 			break;
704658b6edaSMichael Mueller 		}
705658b6edaSMichael Mueller 		break;
706a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
707a374e892STony Krowiak 		switch (attr->attr) {
708a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
709a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
710a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
711a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
712a374e892STony Krowiak 			ret = 0;
713a374e892STony Krowiak 			break;
714a374e892STony Krowiak 		default:
715a374e892STony Krowiak 			ret = -ENXIO;
716a374e892STony Krowiak 			break;
717a374e892STony Krowiak 		}
718a374e892STony Krowiak 		break;
719f2061656SDominik Dingel 	default:
720f2061656SDominik Dingel 		ret = -ENXIO;
721f2061656SDominik Dingel 		break;
722f2061656SDominik Dingel 	}
723f2061656SDominik Dingel 
724f2061656SDominik Dingel 	return ret;
725f2061656SDominik Dingel }
726f2061656SDominik Dingel 
727b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
728b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
729b0c632dbSHeiko Carstens {
730b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
731b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
732f2061656SDominik Dingel 	struct kvm_device_attr attr;
733b0c632dbSHeiko Carstens 	int r;
734b0c632dbSHeiko Carstens 
735b0c632dbSHeiko Carstens 	switch (ioctl) {
736ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
737ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
738ba5c1e9bSCarsten Otte 
739ba5c1e9bSCarsten Otte 		r = -EFAULT;
740ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
741ba5c1e9bSCarsten Otte 			break;
742ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
743ba5c1e9bSCarsten Otte 		break;
744ba5c1e9bSCarsten Otte 	}
745d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
746d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
747d938dc55SCornelia Huck 		r = -EFAULT;
748d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
749d938dc55SCornelia Huck 			break;
750d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
751d938dc55SCornelia Huck 		break;
752d938dc55SCornelia Huck 	}
75384223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
75484223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
75584223598SCornelia Huck 
75684223598SCornelia Huck 		r = -EINVAL;
75784223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
75884223598SCornelia Huck 			/* Set up dummy routing. */
75984223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
76084223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
76184223598SCornelia Huck 			r = 0;
76284223598SCornelia Huck 		}
76384223598SCornelia Huck 		break;
76484223598SCornelia Huck 	}
765f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
766f2061656SDominik Dingel 		r = -EFAULT;
767f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
768f2061656SDominik Dingel 			break;
769f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
770f2061656SDominik Dingel 		break;
771f2061656SDominik Dingel 	}
772f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
773f2061656SDominik Dingel 		r = -EFAULT;
774f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
775f2061656SDominik Dingel 			break;
776f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
777f2061656SDominik Dingel 		break;
778f2061656SDominik Dingel 	}
779f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
780f2061656SDominik Dingel 		r = -EFAULT;
781f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
782f2061656SDominik Dingel 			break;
783f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
784f2061656SDominik Dingel 		break;
785f2061656SDominik Dingel 	}
786b0c632dbSHeiko Carstens 	default:
787367e1319SAvi Kivity 		r = -ENOTTY;
788b0c632dbSHeiko Carstens 	}
789b0c632dbSHeiko Carstens 
790b0c632dbSHeiko Carstens 	return r;
791b0c632dbSHeiko Carstens }
792b0c632dbSHeiko Carstens 
79345c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
79445c9b47cSTony Krowiak {
79545c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
79686044c8cSChristian Borntraeger 	u32 cc = 0;
79745c9b47cSTony Krowiak 
79886044c8cSChristian Borntraeger 	memset(config, 0, 128);
79945c9b47cSTony Krowiak 	asm volatile(
80045c9b47cSTony Krowiak 		"lgr 0,%1\n"
80145c9b47cSTony Krowiak 		"lgr 2,%2\n"
80245c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
80386044c8cSChristian Borntraeger 		"0: ipm %0\n"
80445c9b47cSTony Krowiak 		"srl %0,28\n"
80586044c8cSChristian Borntraeger 		"1:\n"
80686044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
80786044c8cSChristian Borntraeger 		: "+r" (cc)
80845c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
80945c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
81045c9b47cSTony Krowiak 	);
81145c9b47cSTony Krowiak 
81245c9b47cSTony Krowiak 	return cc;
81345c9b47cSTony Krowiak }
81445c9b47cSTony Krowiak 
81545c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
81645c9b47cSTony Krowiak {
81745c9b47cSTony Krowiak 	u8 config[128];
81845c9b47cSTony Krowiak 	int cc;
81945c9b47cSTony Krowiak 
82045c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
82145c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
82245c9b47cSTony Krowiak 
82345c9b47cSTony Krowiak 		if (cc)
82445c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
82545c9b47cSTony Krowiak 		else
82645c9b47cSTony Krowiak 			return config[0] & 0x40;
82745c9b47cSTony Krowiak 	}
82845c9b47cSTony Krowiak 
82945c9b47cSTony Krowiak 	return 0;
83045c9b47cSTony Krowiak }
83145c9b47cSTony Krowiak 
83245c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
83345c9b47cSTony Krowiak {
83445c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
83545c9b47cSTony Krowiak 
83645c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
83745c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
83845c9b47cSTony Krowiak 	else
83945c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
84045c9b47cSTony Krowiak }
84145c9b47cSTony Krowiak 
8429d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
8439d8d5786SMichael Mueller {
8449d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
8459d8d5786SMichael Mueller 	cpu_id->version = 0xff;
8469d8d5786SMichael Mueller }
8479d8d5786SMichael Mueller 
8485102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
8495102ee87STony Krowiak {
8509d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
8515102ee87STony Krowiak 		return 0;
8525102ee87STony Krowiak 
8535102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
8545102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
8555102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
8565102ee87STony Krowiak 		return -ENOMEM;
8575102ee87STony Krowiak 
85845c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
8595102ee87STony Krowiak 
860ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
861ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
862ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
863ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
864ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
865ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
866ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
867a374e892STony Krowiak 
8685102ee87STony Krowiak 	return 0;
8695102ee87STony Krowiak }
8705102ee87STony Krowiak 
871e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
872b0c632dbSHeiko Carstens {
8739d8d5786SMichael Mueller 	int i, rc;
874b0c632dbSHeiko Carstens 	char debug_name[16];
875f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
876b0c632dbSHeiko Carstens 
877e08b9637SCarsten Otte 	rc = -EINVAL;
878e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
879e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
880e08b9637SCarsten Otte 		goto out_err;
881e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
882e08b9637SCarsten Otte 		goto out_err;
883e08b9637SCarsten Otte #else
884e08b9637SCarsten Otte 	if (type)
885e08b9637SCarsten Otte 		goto out_err;
886e08b9637SCarsten Otte #endif
887e08b9637SCarsten Otte 
888b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
889b0c632dbSHeiko Carstens 	if (rc)
890d89f5effSJan Kiszka 		goto out_err;
891b0c632dbSHeiko Carstens 
892b290411aSCarsten Otte 	rc = -ENOMEM;
893b290411aSCarsten Otte 
894b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
895b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
896d89f5effSJan Kiszka 		goto out_err;
897f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
898f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
899f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
900f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
901b0c632dbSHeiko Carstens 
902b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
903b0c632dbSHeiko Carstens 
904b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
905b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
90640f5b735SDominik Dingel 		goto out_err;
907b0c632dbSHeiko Carstens 
9089d8d5786SMichael Mueller 	/*
9099d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
9109d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
911981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
912981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
9139d8d5786SMichael Mueller 	 * 31 bits and word aligned.
9149d8d5786SMichael Mueller 	 */
9159d8d5786SMichael Mueller 	kvm->arch.model.fac =
916981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9179d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
91840f5b735SDominik Dingel 		goto out_err;
9199d8d5786SMichael Mueller 
920fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
921981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
92294422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
9239d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
9249d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
925981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9269d8d5786SMichael Mueller 		else
927981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
9289d8d5786SMichael Mueller 	}
9299d8d5786SMichael Mueller 
930981467c9SMichael Mueller 	/* Populate the facility list initially. */
931981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
932981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
933981467c9SMichael Mueller 
9349d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
935658b6edaSMichael Mueller 	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9369d8d5786SMichael Mueller 
9375102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
93840f5b735SDominik Dingel 		goto out_err;
9395102ee87STony Krowiak 
940ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
941ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
9428a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
943a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
944ba5c1e9bSCarsten Otte 
945b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
946b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
947b0c632dbSHeiko Carstens 
948e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
949e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
950e08b9637SCarsten Otte 	} else {
9510349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
952598841caSCarsten Otte 		if (!kvm->arch.gmap)
95340f5b735SDominik Dingel 			goto out_err;
9542c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
95524eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
956e08b9637SCarsten Otte 	}
957fa6b7fe9SCornelia Huck 
958fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
95984223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
96068c55750SEric Farman 	kvm->arch.use_vectors = 0;
96172f25020SJason J. Herne 	kvm->arch.epoch = 0;
962fa6b7fe9SCornelia Huck 
9638ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
9648ad35755SDavid Hildenbrand 
965d89f5effSJan Kiszka 	return 0;
966d89f5effSJan Kiszka out_err:
96740f5b735SDominik Dingel 	kfree(kvm->arch.crypto.crycb);
96840f5b735SDominik Dingel 	free_page((unsigned long)kvm->arch.model.fac);
96940f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
97040f5b735SDominik Dingel 	free_page((unsigned long)(kvm->arch.sca));
971d89f5effSJan Kiszka 	return rc;
972b0c632dbSHeiko Carstens }
973b0c632dbSHeiko Carstens 
974d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
975d329c035SChristian Borntraeger {
976d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
977ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
97867335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
9793c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
98058f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
98158f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
98258f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
983abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
984abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
985abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
98658f9460bSCarsten Otte 	}
987abf4a71eSCarsten Otte 	smp_mb();
98827e0393fSCarsten Otte 
98927e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
99027e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
99127e0393fSCarsten Otte 
992b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
993b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
994d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
995b31288faSKonstantin Weitz 
9966692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
997b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
998d329c035SChristian Borntraeger }
999d329c035SChristian Borntraeger 
1000d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1001d329c035SChristian Borntraeger {
1002d329c035SChristian Borntraeger 	unsigned int i;
1003988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1004d329c035SChristian Borntraeger 
1005988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1006988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1007988a2caeSGleb Natapov 
1008988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1009988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1010d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1011988a2caeSGleb Natapov 
1012988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1013988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1014d329c035SChristian Borntraeger }
1015d329c035SChristian Borntraeger 
1016b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1017b0c632dbSHeiko Carstens {
1018d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
10199d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1020b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1021d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
10225102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
102327e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1024598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1025841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
102667335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1027b0c632dbSHeiko Carstens }
1028b0c632dbSHeiko Carstens 
1029b0c632dbSHeiko Carstens /* Section: vcpu related */
1030dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1031b0c632dbSHeiko Carstens {
1032c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
103327e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
103427e0393fSCarsten Otte 		return -ENOMEM;
10352c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1036dafd032aSDominik Dingel 
103727e0393fSCarsten Otte 	return 0;
103827e0393fSCarsten Otte }
103927e0393fSCarsten Otte 
1040dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1041dafd032aSDominik Dingel {
1042dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1043dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
104459674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
104559674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
10469eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1047b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1048b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1049b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
105068c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
105168c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1052dafd032aSDominik Dingel 
1053dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1054dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1055dafd032aSDominik Dingel 
1056b0c632dbSHeiko Carstens 	return 0;
1057b0c632dbSHeiko Carstens }
1058b0c632dbSHeiko Carstens 
1059b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1060b0c632dbSHeiko Carstens {
10614725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
106268c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
106368c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
106468c55750SEric Farman 	else
10654725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1066b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
106768c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
106868c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
106968c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
107068c55750SEric Farman 	} else {
10714725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10724725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
107368c55750SEric Farman 	}
107459674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1075480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
10769e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1077b0c632dbSHeiko Carstens }
1078b0c632dbSHeiko Carstens 
1079b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1080b0c632dbSHeiko Carstens {
10819e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1082480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
108368c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
108468c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
108568c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
108668c55750SEric Farman 	} else {
10874725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10884725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
108968c55750SEric Farman 	}
109059674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
10914725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
109268c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
109368c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
109468c55750SEric Farman 	else
10954725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1096b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1097b0c632dbSHeiko Carstens }
1098b0c632dbSHeiko Carstens 
1099b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1100b0c632dbSHeiko Carstens {
1101b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1102b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1103b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
11048d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1105b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1106b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1107b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1108b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1109b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1110b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1111b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1112b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1113b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1114672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
11153c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
11163c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
11176352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
11186852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
11192ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1120b0c632dbSHeiko Carstens }
1121b0c632dbSHeiko Carstens 
112231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
112342897d86SMarcelo Tosatti {
112472f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
112572f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
112672f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1127dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1128dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
112942897d86SMarcelo Tosatti }
113042897d86SMarcelo Tosatti 
11315102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
11325102ee87STony Krowiak {
11339d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
11345102ee87STony Krowiak 		return;
11355102ee87STony Krowiak 
1136a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1137a374e892STony Krowiak 
1138a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1139a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1140a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1141a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1142a374e892STony Krowiak 
11435102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
11445102ee87STony Krowiak }
11455102ee87STony Krowiak 
1146b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1147b31605c1SDominik Dingel {
1148b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1149b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1150b31605c1SDominik Dingel }
1151b31605c1SDominik Dingel 
1152b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1153b31605c1SDominik Dingel {
1154b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1155b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1156b31605c1SDominik Dingel 		return -ENOMEM;
1157b31605c1SDominik Dingel 
1158b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1159b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1160b31605c1SDominik Dingel 	return 0;
1161b31605c1SDominik Dingel }
1162b31605c1SDominik Dingel 
116391520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
116491520f1aSMichael Mueller {
116591520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
116691520f1aSMichael Mueller 
116791520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
116891520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
116991520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
117091520f1aSMichael Mueller }
117191520f1aSMichael Mueller 
1172b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1173b0c632dbSHeiko Carstens {
1174b31605c1SDominik Dingel 	int rc = 0;
1175b31288faSKonstantin Weitz 
11769e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
11779e6dabefSCornelia Huck 						    CPUSTAT_SM |
117869d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
117969d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
118091520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
118191520f1aSMichael Mueller 
1182fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
11839d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
11847feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
11857feb6bb8SMichael Mueller 
118669d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1187ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
1188217a4406SHeiko Carstens 	if (sclp_has_siif())
1189217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
1190ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
1191ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
119213211ea7SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
119313211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
119413211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
119513211ea7SEric Farman 	}
1196492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
11975a5e6536SMatthew Rosato 
1198b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1199b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1200b31605c1SDominik Dingel 		if (rc)
1201b31605c1SDominik Dingel 			return rc;
1202b31288faSKonstantin Weitz 	}
12030ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1204ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
12059d8d5786SMichael Mueller 
12065102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
12075102ee87STony Krowiak 
1208b31605c1SDominik Dingel 	return rc;
1209b0c632dbSHeiko Carstens }
1210b0c632dbSHeiko Carstens 
1211b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1212b0c632dbSHeiko Carstens 				      unsigned int id)
1213b0c632dbSHeiko Carstens {
12144d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
12157feb6bb8SMichael Mueller 	struct sie_page *sie_page;
12164d47555aSCarsten Otte 	int rc = -EINVAL;
1217b0c632dbSHeiko Carstens 
12184d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
12194d47555aSCarsten Otte 		goto out;
12204d47555aSCarsten Otte 
12214d47555aSCarsten Otte 	rc = -ENOMEM;
12224d47555aSCarsten Otte 
1223b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1224b0c632dbSHeiko Carstens 	if (!vcpu)
12254d47555aSCarsten Otte 		goto out;
1226b0c632dbSHeiko Carstens 
12277feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
12287feb6bb8SMichael Mueller 	if (!sie_page)
1229b0c632dbSHeiko Carstens 		goto out_free_cpu;
1230b0c632dbSHeiko Carstens 
12317feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
12327feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
123368c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
12347feb6bb8SMichael Mueller 
1235b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
123658f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
123758f9460bSCarsten Otte 		if (!kvm->arch.sca) {
123858f9460bSCarsten Otte 			WARN_ON_ONCE(1);
123958f9460bSCarsten Otte 			goto out_free_cpu;
124058f9460bSCarsten Otte 		}
1241abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
124258f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
124358f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
124458f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
124558f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1246b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1247fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
124858f9460bSCarsten Otte 	}
1249b0c632dbSHeiko Carstens 
1250ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1251ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1252d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
12535288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1254ba5c1e9bSCarsten Otte 
1255b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1256b0c632dbSHeiko Carstens 	if (rc)
12577b06bf2fSWei Yongjun 		goto out_free_sie_block;
1258b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1259b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1260ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1261b0c632dbSHeiko Carstens 
1262b0c632dbSHeiko Carstens 	return vcpu;
12637b06bf2fSWei Yongjun out_free_sie_block:
12647b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1265b0c632dbSHeiko Carstens out_free_cpu:
1266b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
12674d47555aSCarsten Otte out:
1268b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1269b0c632dbSHeiko Carstens }
1270b0c632dbSHeiko Carstens 
1271b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1272b0c632dbSHeiko Carstens {
12739a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1274b0c632dbSHeiko Carstens }
1275b0c632dbSHeiko Carstens 
127649b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
127749b99e1eSChristian Borntraeger {
127849b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
127949b99e1eSChristian Borntraeger }
128049b99e1eSChristian Borntraeger 
128149b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
128249b99e1eSChristian Borntraeger {
128349b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
128449b99e1eSChristian Borntraeger }
128549b99e1eSChristian Borntraeger 
128649b99e1eSChristian Borntraeger /*
128749b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
128849b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
128949b99e1eSChristian Borntraeger  * return immediately. */
129049b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
129149b99e1eSChristian Borntraeger {
129249b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
129349b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
129449b99e1eSChristian Borntraeger 		cpu_relax();
129549b99e1eSChristian Borntraeger }
129649b99e1eSChristian Borntraeger 
129749b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
129849b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
129949b99e1eSChristian Borntraeger {
130049b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
130149b99e1eSChristian Borntraeger 	exit_sie(vcpu);
130249b99e1eSChristian Borntraeger }
130349b99e1eSChristian Borntraeger 
13042c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
13052c70fe44SChristian Borntraeger {
13062c70fe44SChristian Borntraeger 	int i;
13072c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
13082c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
13092c70fe44SChristian Borntraeger 
13102c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
13112c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1312fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
13132c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
13142c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
13152c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
13162c70fe44SChristian Borntraeger 		}
13172c70fe44SChristian Borntraeger 	}
13182c70fe44SChristian Borntraeger }
13192c70fe44SChristian Borntraeger 
1320b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1321b6d33834SChristoffer Dall {
1322b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1323b6d33834SChristoffer Dall 	BUG();
1324b6d33834SChristoffer Dall 	return 0;
1325b6d33834SChristoffer Dall }
1326b6d33834SChristoffer Dall 
132714eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
132814eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
132914eebd91SCarsten Otte {
133014eebd91SCarsten Otte 	int r = -EINVAL;
133114eebd91SCarsten Otte 
133214eebd91SCarsten Otte 	switch (reg->id) {
133329b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
133429b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
133529b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
133629b7c71bSCarsten Otte 		break;
133729b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
133829b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
133929b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
134029b7c71bSCarsten Otte 		break;
134146a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
134246a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
134346a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
134446a6dd1cSJason J. herne 		break;
134546a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
134646a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
134746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
134846a6dd1cSJason J. herne 		break;
1349536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1350536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1351536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1352536336c2SDominik Dingel 		break;
1353536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1354536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1355536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1356536336c2SDominik Dingel 		break;
1357536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1358536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1359536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1360536336c2SDominik Dingel 		break;
1361672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1362672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1363672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1364672550fbSChristian Borntraeger 		break;
1365afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1366afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1367afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1368afa45ff5SChristian Borntraeger 		break;
136914eebd91SCarsten Otte 	default:
137014eebd91SCarsten Otte 		break;
137114eebd91SCarsten Otte 	}
137214eebd91SCarsten Otte 
137314eebd91SCarsten Otte 	return r;
137414eebd91SCarsten Otte }
137514eebd91SCarsten Otte 
137614eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
137714eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
137814eebd91SCarsten Otte {
137914eebd91SCarsten Otte 	int r = -EINVAL;
138014eebd91SCarsten Otte 
138114eebd91SCarsten Otte 	switch (reg->id) {
138229b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
138329b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
138429b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
138529b7c71bSCarsten Otte 		break;
138629b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
138729b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
138829b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
138929b7c71bSCarsten Otte 		break;
139046a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
139146a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
139246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
139346a6dd1cSJason J. herne 		break;
139446a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
139546a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
139646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
139746a6dd1cSJason J. herne 		break;
1398536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1399536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1400536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
14019fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
14029fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1403536336c2SDominik Dingel 		break;
1404536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1405536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1406536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1407536336c2SDominik Dingel 		break;
1408536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1409536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1410536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1411536336c2SDominik Dingel 		break;
1412672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1413672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1414672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1415672550fbSChristian Borntraeger 		break;
1416afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1417afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1418afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1419afa45ff5SChristian Borntraeger 		break;
142014eebd91SCarsten Otte 	default:
142114eebd91SCarsten Otte 		break;
142214eebd91SCarsten Otte 	}
142314eebd91SCarsten Otte 
142414eebd91SCarsten Otte 	return r;
142514eebd91SCarsten Otte }
1426b6d33834SChristoffer Dall 
1427b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1428b0c632dbSHeiko Carstens {
1429b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1430b0c632dbSHeiko Carstens 	return 0;
1431b0c632dbSHeiko Carstens }
1432b0c632dbSHeiko Carstens 
1433b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1434b0c632dbSHeiko Carstens {
14355a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1436b0c632dbSHeiko Carstens 	return 0;
1437b0c632dbSHeiko Carstens }
1438b0c632dbSHeiko Carstens 
1439b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1440b0c632dbSHeiko Carstens {
14415a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1442b0c632dbSHeiko Carstens 	return 0;
1443b0c632dbSHeiko Carstens }
1444b0c632dbSHeiko Carstens 
1445b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1446b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1447b0c632dbSHeiko Carstens {
144859674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1449b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
145059674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1451b0c632dbSHeiko Carstens 	return 0;
1452b0c632dbSHeiko Carstens }
1453b0c632dbSHeiko Carstens 
1454b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1455b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1456b0c632dbSHeiko Carstens {
145759674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1458b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1459b0c632dbSHeiko Carstens 	return 0;
1460b0c632dbSHeiko Carstens }
1461b0c632dbSHeiko Carstens 
1462b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1463b0c632dbSHeiko Carstens {
14644725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
14654725c860SMartin Schwidefsky 		return -EINVAL;
1466b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
14674725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
14684725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
14694725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1470b0c632dbSHeiko Carstens 	return 0;
1471b0c632dbSHeiko Carstens }
1472b0c632dbSHeiko Carstens 
1473b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1474b0c632dbSHeiko Carstens {
1475b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1476b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1477b0c632dbSHeiko Carstens 	return 0;
1478b0c632dbSHeiko Carstens }
1479b0c632dbSHeiko Carstens 
1480b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1481b0c632dbSHeiko Carstens {
1482b0c632dbSHeiko Carstens 	int rc = 0;
1483b0c632dbSHeiko Carstens 
14847a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1485b0c632dbSHeiko Carstens 		rc = -EBUSY;
1486d7b0b5ebSCarsten Otte 	else {
1487d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1488d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1489d7b0b5ebSCarsten Otte 	}
1490b0c632dbSHeiko Carstens 	return rc;
1491b0c632dbSHeiko Carstens }
1492b0c632dbSHeiko Carstens 
1493b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1494b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1495b0c632dbSHeiko Carstens {
1496b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1497b0c632dbSHeiko Carstens }
1498b0c632dbSHeiko Carstens 
149927291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
150027291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
150127291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
150227291e21SDavid Hildenbrand 
1503d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1504d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1505b0c632dbSHeiko Carstens {
150627291e21SDavid Hildenbrand 	int rc = 0;
150727291e21SDavid Hildenbrand 
150827291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
150927291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
151027291e21SDavid Hildenbrand 
15112de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
151227291e21SDavid Hildenbrand 		return -EINVAL;
151327291e21SDavid Hildenbrand 
151427291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
151527291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
151627291e21SDavid Hildenbrand 		/* enforce guest PER */
151727291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
151827291e21SDavid Hildenbrand 
151927291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
152027291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
152127291e21SDavid Hildenbrand 	} else {
152227291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
152327291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
152427291e21SDavid Hildenbrand 	}
152527291e21SDavid Hildenbrand 
152627291e21SDavid Hildenbrand 	if (rc) {
152727291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
152827291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
152927291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
153027291e21SDavid Hildenbrand 	}
153127291e21SDavid Hildenbrand 
153227291e21SDavid Hildenbrand 	return rc;
1533b0c632dbSHeiko Carstens }
1534b0c632dbSHeiko Carstens 
153562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
153662d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
153762d9f0dbSMarcelo Tosatti {
15386352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
15396352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
15406352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
154162d9f0dbSMarcelo Tosatti }
154262d9f0dbSMarcelo Tosatti 
154362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
154462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
154562d9f0dbSMarcelo Tosatti {
15466352e4d2SDavid Hildenbrand 	int rc = 0;
15476352e4d2SDavid Hildenbrand 
15486352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
15496352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
15506352e4d2SDavid Hildenbrand 
15516352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
15526352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
15536352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
15546352e4d2SDavid Hildenbrand 		break;
15556352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
15566352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
15576352e4d2SDavid Hildenbrand 		break;
15586352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
15596352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
15606352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
15616352e4d2SDavid Hildenbrand 	default:
15626352e4d2SDavid Hildenbrand 		rc = -ENXIO;
15636352e4d2SDavid Hildenbrand 	}
15646352e4d2SDavid Hildenbrand 
15656352e4d2SDavid Hildenbrand 	return rc;
156662d9f0dbSMarcelo Tosatti }
156762d9f0dbSMarcelo Tosatti 
1568b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1569b31605c1SDominik Dingel {
1570b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1571b31605c1SDominik Dingel 		return false;
1572b31605c1SDominik Dingel 	/* only enable for z10 and later */
1573b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1574b31605c1SDominik Dingel 		return false;
1575b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1576b31605c1SDominik Dingel 		return false;
1577b31605c1SDominik Dingel 	return true;
1578b31605c1SDominik Dingel }
1579b31605c1SDominik Dingel 
15808ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
15818ad35755SDavid Hildenbrand {
15828ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
15838ad35755SDavid Hildenbrand }
15848ad35755SDavid Hildenbrand 
15852c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
15862c70fe44SChristian Borntraeger {
15878ad35755SDavid Hildenbrand retry:
15888ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
15892c70fe44SChristian Borntraeger 	/*
15902c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
15912c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
15922c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
15932c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
15942c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
15952c70fe44SChristian Borntraeger 	 */
15968ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
15972c70fe44SChristian Borntraeger 		int rc;
15982c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1599fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
16002c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
16012c70fe44SChristian Borntraeger 		if (rc)
16022c70fe44SChristian Borntraeger 			return rc;
16038ad35755SDavid Hildenbrand 		goto retry;
16042c70fe44SChristian Borntraeger 	}
16058ad35755SDavid Hildenbrand 
1606d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1607d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1608d3d692c8SDavid Hildenbrand 		goto retry;
1609d3d692c8SDavid Hildenbrand 	}
1610d3d692c8SDavid Hildenbrand 
16118ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
16128ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
16138ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
16148ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
16158ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
16168ad35755SDavid Hildenbrand 		}
16178ad35755SDavid Hildenbrand 		goto retry;
16188ad35755SDavid Hildenbrand 	}
16198ad35755SDavid Hildenbrand 
16208ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
16218ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
16228ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
16238ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
16248ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
16258ad35755SDavid Hildenbrand 		}
16268ad35755SDavid Hildenbrand 		goto retry;
16278ad35755SDavid Hildenbrand 	}
16288ad35755SDavid Hildenbrand 
16290759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
16300759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
16310759d068SDavid Hildenbrand 
16322c70fe44SChristian Borntraeger 	return 0;
16332c70fe44SChristian Borntraeger }
16342c70fe44SChristian Borntraeger 
1635fa576c58SThomas Huth /**
1636fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1637fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1638fa576c58SThomas Huth  * @gpa: Guest physical address
1639fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1640fa576c58SThomas Huth  *
1641fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1642fa576c58SThomas Huth  *
1643fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1644fa576c58SThomas Huth  */
1645fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
164624eb3a82SDominik Dingel {
1647527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1648527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
164924eb3a82SDominik Dingel }
165024eb3a82SDominik Dingel 
16513c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
16523c038e6bSDominik Dingel 				      unsigned long token)
16533c038e6bSDominik Dingel {
16543c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1655383d0b05SJens Freimann 	struct kvm_s390_irq irq;
16563c038e6bSDominik Dingel 
16573c038e6bSDominik Dingel 	if (start_token) {
1658383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1659383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1660383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
16613c038e6bSDominik Dingel 	} else {
16623c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1663383d0b05SJens Freimann 		inti.parm64 = token;
16643c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
16653c038e6bSDominik Dingel 	}
16663c038e6bSDominik Dingel }
16673c038e6bSDominik Dingel 
16683c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
16693c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
16703c038e6bSDominik Dingel {
16713c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
16723c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
16733c038e6bSDominik Dingel }
16743c038e6bSDominik Dingel 
16753c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
16763c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
16773c038e6bSDominik Dingel {
16783c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
16793c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
16803c038e6bSDominik Dingel }
16813c038e6bSDominik Dingel 
16823c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
16833c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
16843c038e6bSDominik Dingel {
16853c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
16863c038e6bSDominik Dingel }
16873c038e6bSDominik Dingel 
16883c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
16893c038e6bSDominik Dingel {
16903c038e6bSDominik Dingel 	/*
16913c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
16923c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
16933c038e6bSDominik Dingel 	 */
16943c038e6bSDominik Dingel 	return true;
16953c038e6bSDominik Dingel }
16963c038e6bSDominik Dingel 
16973c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
16983c038e6bSDominik Dingel {
16993c038e6bSDominik Dingel 	hva_t hva;
17003c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
17013c038e6bSDominik Dingel 	int rc;
17023c038e6bSDominik Dingel 
17033c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
17043c038e6bSDominik Dingel 		return 0;
17053c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
17063c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
17073c038e6bSDominik Dingel 		return 0;
17083c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
17093c038e6bSDominik Dingel 		return 0;
17109a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
17113c038e6bSDominik Dingel 		return 0;
17123c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
17133c038e6bSDominik Dingel 		return 0;
17143c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
17153c038e6bSDominik Dingel 		return 0;
17163c038e6bSDominik Dingel 
171781480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
171881480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
171981480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
17203c038e6bSDominik Dingel 		return 0;
17213c038e6bSDominik Dingel 
17223c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
17233c038e6bSDominik Dingel 	return rc;
17243c038e6bSDominik Dingel }
17253c038e6bSDominik Dingel 
17263fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1727b0c632dbSHeiko Carstens {
17283fb4c40fSThomas Huth 	int rc, cpuflags;
1729e168bf8dSCarsten Otte 
17303c038e6bSDominik Dingel 	/*
17313c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
17323c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
17333c038e6bSDominik Dingel 	 * handled outside the worker.
17343c038e6bSDominik Dingel 	 */
17353c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
17363c038e6bSDominik Dingel 
17375a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1738b0c632dbSHeiko Carstens 
1739b0c632dbSHeiko Carstens 	if (need_resched())
1740b0c632dbSHeiko Carstens 		schedule();
1741b0c632dbSHeiko Carstens 
1742d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
174371cde587SChristian Borntraeger 		s390_handle_mcck();
174471cde587SChristian Borntraeger 
174579395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
174679395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
174779395031SJens Freimann 		if (rc)
174879395031SJens Freimann 			return rc;
174979395031SJens Freimann 	}
17500ff31867SCarsten Otte 
17512c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
17522c70fe44SChristian Borntraeger 	if (rc)
17532c70fe44SChristian Borntraeger 		return rc;
17542c70fe44SChristian Borntraeger 
175527291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
175627291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
175727291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
175827291e21SDavid Hildenbrand 	}
175927291e21SDavid Hildenbrand 
1760b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
17613fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
17623fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
17633fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
17642b29a9fdSDominik Dingel 
17653fb4c40fSThomas Huth 	return 0;
17663fb4c40fSThomas Huth }
17673fb4c40fSThomas Huth 
1768492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1769492d8642SThomas Huth {
1770492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1771492d8642SThomas Huth 	u8 opcode;
1772492d8642SThomas Huth 	int rc;
1773492d8642SThomas Huth 
1774492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1775492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
1776492d8642SThomas Huth 
1777492d8642SThomas Huth 	/*
1778492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
1779492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
1780492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
1781492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
1782492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
1783492d8642SThomas Huth 	 * to be able to forward the PSW.
1784492d8642SThomas Huth 	 */
17858ae04b8fSAlexander Yarygin 	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
1786492d8642SThomas Huth 	if (rc)
1787492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
1788492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1789492d8642SThomas Huth 
1790492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1791492d8642SThomas Huth }
1792492d8642SThomas Huth 
17933fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
17943fb4c40fSThomas Huth {
179524eb3a82SDominik Dingel 	int rc = -1;
17962b29a9fdSDominik Dingel 
17972b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
17982b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
17992b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
18002b29a9fdSDominik Dingel 
180127291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
180227291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
180327291e21SDavid Hildenbrand 
18043fb4c40fSThomas Huth 	if (exit_reason >= 0) {
18057c470539SMartin Schwidefsky 		rc = 0;
1806210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1807210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1808210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1809210b1607SThomas Huth 						current->thread.gmap_addr;
1810210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1811210b1607SThomas Huth 		rc = -EREMOTE;
181224eb3a82SDominik Dingel 
181324eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
18143c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
181524eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1816fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
181724eb3a82SDominik Dingel 			rc = 0;
1818fa576c58SThomas Huth 		} else {
1819fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1820fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1821fa576c58SThomas Huth 		}
182224eb3a82SDominik Dingel 	}
182324eb3a82SDominik Dingel 
1824492d8642SThomas Huth 	if (rc == -1)
1825492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
1826b0c632dbSHeiko Carstens 
18275a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
18283fb4c40fSThomas Huth 
1829a76ccff6SThomas Huth 	if (rc == 0) {
1830a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
18312955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
18322955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1833a76ccff6SThomas Huth 		else
1834a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1835a76ccff6SThomas Huth 	}
1836a76ccff6SThomas Huth 
18373fb4c40fSThomas Huth 	return rc;
18383fb4c40fSThomas Huth }
18393fb4c40fSThomas Huth 
18403fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
18413fb4c40fSThomas Huth {
18423fb4c40fSThomas Huth 	int rc, exit_reason;
18433fb4c40fSThomas Huth 
1844800c1065SThomas Huth 	/*
1845800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1846800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1847800c1065SThomas Huth 	 */
1848800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1849800c1065SThomas Huth 
1850a76ccff6SThomas Huth 	do {
18513fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
18523fb4c40fSThomas Huth 		if (rc)
1853a76ccff6SThomas Huth 			break;
18543fb4c40fSThomas Huth 
1855800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
18563fb4c40fSThomas Huth 		/*
1857a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1858a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
18593fb4c40fSThomas Huth 		 */
18603fb4c40fSThomas Huth 		preempt_disable();
18613fb4c40fSThomas Huth 		kvm_guest_enter();
18623fb4c40fSThomas Huth 		preempt_enable();
1863a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1864a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
18653fb4c40fSThomas Huth 		kvm_guest_exit();
1866800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
18673fb4c40fSThomas Huth 
18683fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
186927291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
18703fb4c40fSThomas Huth 
1871800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1872e168bf8dSCarsten Otte 	return rc;
1873b0c632dbSHeiko Carstens }
1874b0c632dbSHeiko Carstens 
1875b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1876b028ee3eSDavid Hildenbrand {
1877b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1878b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1879b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1880b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1881b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1882b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1883d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1884d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1885b028ee3eSDavid Hildenbrand 	}
1886b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1887b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1888b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1889b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1890b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1891b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1892b028ee3eSDavid Hildenbrand 	}
1893b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1894b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1895b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1896b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
18979fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
18989fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1899b028ee3eSDavid Hildenbrand 	}
1900b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1901b028ee3eSDavid Hildenbrand }
1902b028ee3eSDavid Hildenbrand 
1903b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1904b028ee3eSDavid Hildenbrand {
1905b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1906b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1907b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1908b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1909b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1910b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1911b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1912b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1913b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1914b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1915b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1916b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1917b028ee3eSDavid Hildenbrand }
1918b028ee3eSDavid Hildenbrand 
1919b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1920b0c632dbSHeiko Carstens {
19218f2abe6aSChristian Borntraeger 	int rc;
1922b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1923b0c632dbSHeiko Carstens 
192427291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
192527291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
192627291e21SDavid Hildenbrand 		return 0;
192727291e21SDavid Hildenbrand 	}
192827291e21SDavid Hildenbrand 
1929b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1930b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1931b0c632dbSHeiko Carstens 
19326352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
19336852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
19346352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
19356352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
19366352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
19376352e4d2SDavid Hildenbrand 		return -EINVAL;
19386352e4d2SDavid Hildenbrand 	}
1939b0c632dbSHeiko Carstens 
1940b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1941d7b0b5ebSCarsten Otte 
1942dab4079dSHeiko Carstens 	might_fault();
1943e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
19449ace903dSChristian Ehrhardt 
1945b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1946b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
19478f2abe6aSChristian Borntraeger 		rc = -EINTR;
1948b1d16c49SChristian Ehrhardt 	}
19498f2abe6aSChristian Borntraeger 
195027291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
195127291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
195227291e21SDavid Hildenbrand 		rc = 0;
195327291e21SDavid Hildenbrand 	}
195427291e21SDavid Hildenbrand 
1955b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
19568f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
19578f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
19588f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
19598f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
19608f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
19618f2abe6aSChristian Borntraeger 		rc = 0;
19628f2abe6aSChristian Borntraeger 	}
19638f2abe6aSChristian Borntraeger 
19648f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
19658f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
19668f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
19678f2abe6aSChristian Borntraeger 		rc = 0;
19688f2abe6aSChristian Borntraeger 	}
19698f2abe6aSChristian Borntraeger 
1970b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1971d7b0b5ebSCarsten Otte 
1972b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1973b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1974b0c632dbSHeiko Carstens 
1975b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
19767e8e6ab4SHeiko Carstens 	return rc;
1977b0c632dbSHeiko Carstens }
1978b0c632dbSHeiko Carstens 
1979b0c632dbSHeiko Carstens /*
1980b0c632dbSHeiko Carstens  * store status at address
1981b0c632dbSHeiko Carstens  * we use have two special cases:
1982b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1983b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1984b0c632dbSHeiko Carstens  */
1985d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1986b0c632dbSHeiko Carstens {
1987092670cdSCarsten Otte 	unsigned char archmode = 1;
1988fda902cbSMichael Mueller 	unsigned int px;
1989178bd789SThomas Huth 	u64 clkcomp;
1990d0bce605SHeiko Carstens 	int rc;
1991b0c632dbSHeiko Carstens 
1992d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1993d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1994b0c632dbSHeiko Carstens 			return -EFAULT;
1995d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1996d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1997d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1998b0c632dbSHeiko Carstens 			return -EFAULT;
1999d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2000d0bce605SHeiko Carstens 	}
2001d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2002d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
2003d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2004d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2005d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2006d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2007fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2008d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2009fda902cbSMichael Mueller 			      &px, 4);
2010d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2011d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2012d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2013d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2014d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2015d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2016d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2017178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2018d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2019d0bce605SHeiko Carstens 			      &clkcomp, 8);
2020d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2021d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2022d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2023d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2024d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2025b0c632dbSHeiko Carstens }
2026b0c632dbSHeiko Carstens 
2027e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2028e879892cSThomas Huth {
2029e879892cSThomas Huth 	/*
2030e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2031e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2032e879892cSThomas Huth 	 * it into the save area
2033e879892cSThomas Huth 	 */
2034e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2035e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2036e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2037e879892cSThomas Huth 
2038e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2039e879892cSThomas Huth }
2040e879892cSThomas Huth 
2041bc17de7cSEric Farman /*
2042bc17de7cSEric Farman  * store additional status at address
2043bc17de7cSEric Farman  */
2044bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2045bc17de7cSEric Farman 					unsigned long gpa)
2046bc17de7cSEric Farman {
2047bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2048bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2049bc17de7cSEric Farman 		return 0;
2050bc17de7cSEric Farman 
2051bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2052bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2053bc17de7cSEric Farman }
2054bc17de7cSEric Farman 
2055bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2056bc17de7cSEric Farman {
2057bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2058bc17de7cSEric Farman 		return 0;
2059bc17de7cSEric Farman 
2060bc17de7cSEric Farman 	/*
2061bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2062bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2063bc17de7cSEric Farman 	 * it into the save area.
2064bc17de7cSEric Farman 	 */
2065bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2066bc17de7cSEric Farman 
2067bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2068bc17de7cSEric Farman }
2069bc17de7cSEric Farman 
20708ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20718ad35755SDavid Hildenbrand {
20728ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
20738ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
20748ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20758ad35755SDavid Hildenbrand }
20768ad35755SDavid Hildenbrand 
20778ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
20788ad35755SDavid Hildenbrand {
20798ad35755SDavid Hildenbrand 	unsigned int i;
20808ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
20818ad35755SDavid Hildenbrand 
20828ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
20838ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
20848ad35755SDavid Hildenbrand 	}
20858ad35755SDavid Hildenbrand }
20868ad35755SDavid Hildenbrand 
20878ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20888ad35755SDavid Hildenbrand {
20898ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
20908ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
20918ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20928ad35755SDavid Hildenbrand }
20938ad35755SDavid Hildenbrand 
20946852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
20956852d7b6SDavid Hildenbrand {
20968ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
20978ad35755SDavid Hildenbrand 
20988ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
20998ad35755SDavid Hildenbrand 		return;
21008ad35755SDavid Hildenbrand 
21016852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
21028ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2103433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21048ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21058ad35755SDavid Hildenbrand 
21068ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21078ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
21088ad35755SDavid Hildenbrand 			started_vcpus++;
21098ad35755SDavid Hildenbrand 	}
21108ad35755SDavid Hildenbrand 
21118ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
21128ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
21138ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
21148ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
21158ad35755SDavid Hildenbrand 		/*
21168ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
21178ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
21188ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
21198ad35755SDavid Hildenbrand 		 */
21208ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
21218ad35755SDavid Hildenbrand 	}
21228ad35755SDavid Hildenbrand 
21236852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21248ad35755SDavid Hildenbrand 	/*
21258ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
21268ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
21278ad35755SDavid Hildenbrand 	 */
2128d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2129433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21308ad35755SDavid Hildenbrand 	return;
21316852d7b6SDavid Hildenbrand }
21326852d7b6SDavid Hildenbrand 
21336852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
21346852d7b6SDavid Hildenbrand {
21358ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
21368ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
21378ad35755SDavid Hildenbrand 
21388ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
21398ad35755SDavid Hildenbrand 		return;
21408ad35755SDavid Hildenbrand 
21416852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
21428ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2143433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21448ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21458ad35755SDavid Hildenbrand 
214632f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
21476cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
214832f5ff63SDavid Hildenbrand 
21496cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21508ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
21518ad35755SDavid Hildenbrand 
21528ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21538ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
21548ad35755SDavid Hildenbrand 			started_vcpus++;
21558ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
21568ad35755SDavid Hildenbrand 		}
21578ad35755SDavid Hildenbrand 	}
21588ad35755SDavid Hildenbrand 
21598ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
21608ad35755SDavid Hildenbrand 		/*
21618ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
21628ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
21638ad35755SDavid Hildenbrand 		 */
21648ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
21658ad35755SDavid Hildenbrand 	}
21668ad35755SDavid Hildenbrand 
2167433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21688ad35755SDavid Hildenbrand 	return;
21696852d7b6SDavid Hildenbrand }
21706852d7b6SDavid Hildenbrand 
2171d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2172d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2173d6712df9SCornelia Huck {
2174d6712df9SCornelia Huck 	int r;
2175d6712df9SCornelia Huck 
2176d6712df9SCornelia Huck 	if (cap->flags)
2177d6712df9SCornelia Huck 		return -EINVAL;
2178d6712df9SCornelia Huck 
2179d6712df9SCornelia Huck 	switch (cap->cap) {
2180fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2181fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2182fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2183fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2184fa6b7fe9SCornelia Huck 		}
2185fa6b7fe9SCornelia Huck 		r = 0;
2186fa6b7fe9SCornelia Huck 		break;
2187d6712df9SCornelia Huck 	default:
2188d6712df9SCornelia Huck 		r = -EINVAL;
2189d6712df9SCornelia Huck 		break;
2190d6712df9SCornelia Huck 	}
2191d6712df9SCornelia Huck 	return r;
2192d6712df9SCornelia Huck }
2193d6712df9SCornelia Huck 
2194*41408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2195*41408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
2196*41408c28SThomas Huth {
2197*41408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
2198*41408c28SThomas Huth 	void *tmpbuf = NULL;
2199*41408c28SThomas Huth 	int r, srcu_idx;
2200*41408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2201*41408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
2202*41408c28SThomas Huth 
2203*41408c28SThomas Huth 	if (mop->flags & ~supported_flags)
2204*41408c28SThomas Huth 		return -EINVAL;
2205*41408c28SThomas Huth 
2206*41408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
2207*41408c28SThomas Huth 		return -E2BIG;
2208*41408c28SThomas Huth 
2209*41408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2210*41408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
2211*41408c28SThomas Huth 		if (!tmpbuf)
2212*41408c28SThomas Huth 			return -ENOMEM;
2213*41408c28SThomas Huth 	}
2214*41408c28SThomas Huth 
2215*41408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2216*41408c28SThomas Huth 
2217*41408c28SThomas Huth 	switch (mop->op) {
2218*41408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
2219*41408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2220*41408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2221*41408c28SThomas Huth 			break;
2222*41408c28SThomas Huth 		}
2223*41408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2224*41408c28SThomas Huth 		if (r == 0) {
2225*41408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
2226*41408c28SThomas Huth 				r = -EFAULT;
2227*41408c28SThomas Huth 		}
2228*41408c28SThomas Huth 		break;
2229*41408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
2230*41408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2231*41408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2232*41408c28SThomas Huth 			break;
2233*41408c28SThomas Huth 		}
2234*41408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2235*41408c28SThomas Huth 			r = -EFAULT;
2236*41408c28SThomas Huth 			break;
2237*41408c28SThomas Huth 		}
2238*41408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2239*41408c28SThomas Huth 		break;
2240*41408c28SThomas Huth 	default:
2241*41408c28SThomas Huth 		r = -EINVAL;
2242*41408c28SThomas Huth 	}
2243*41408c28SThomas Huth 
2244*41408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2245*41408c28SThomas Huth 
2246*41408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2247*41408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2248*41408c28SThomas Huth 
2249*41408c28SThomas Huth 	vfree(tmpbuf);
2250*41408c28SThomas Huth 	return r;
2251*41408c28SThomas Huth }
2252*41408c28SThomas Huth 
2253b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2254b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2255b0c632dbSHeiko Carstens {
2256b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2257b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2258800c1065SThomas Huth 	int idx;
2259bc923cc9SAvi Kivity 	long r;
2260b0c632dbSHeiko Carstens 
226193736624SAvi Kivity 	switch (ioctl) {
226293736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2263ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2264383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2265ba5c1e9bSCarsten Otte 
226693736624SAvi Kivity 		r = -EFAULT;
2267ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
226893736624SAvi Kivity 			break;
2269383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2270383d0b05SJens Freimann 			return -EINVAL;
2271383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
227293736624SAvi Kivity 		break;
2273ba5c1e9bSCarsten Otte 	}
2274b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2275800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2276bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2277800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2278bc923cc9SAvi Kivity 		break;
2279b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2280b0c632dbSHeiko Carstens 		psw_t psw;
2281b0c632dbSHeiko Carstens 
2282bc923cc9SAvi Kivity 		r = -EFAULT;
2283b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2284bc923cc9SAvi Kivity 			break;
2285bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2286bc923cc9SAvi Kivity 		break;
2287b0c632dbSHeiko Carstens 	}
2288b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2289bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2290bc923cc9SAvi Kivity 		break;
229114eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
229214eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
229314eebd91SCarsten Otte 		struct kvm_one_reg reg;
229414eebd91SCarsten Otte 		r = -EFAULT;
229514eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
229614eebd91SCarsten Otte 			break;
229714eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
229814eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
229914eebd91SCarsten Otte 		else
230014eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
230114eebd91SCarsten Otte 		break;
230214eebd91SCarsten Otte 	}
230327e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
230427e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
230527e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
230627e0393fSCarsten Otte 
230727e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
230827e0393fSCarsten Otte 			r = -EFAULT;
230927e0393fSCarsten Otte 			break;
231027e0393fSCarsten Otte 		}
231127e0393fSCarsten Otte 
231227e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
231327e0393fSCarsten Otte 			r = -EINVAL;
231427e0393fSCarsten Otte 			break;
231527e0393fSCarsten Otte 		}
231627e0393fSCarsten Otte 
231727e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
231827e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
231927e0393fSCarsten Otte 		break;
232027e0393fSCarsten Otte 	}
232127e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
232227e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
232327e0393fSCarsten Otte 
232427e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
232527e0393fSCarsten Otte 			r = -EFAULT;
232627e0393fSCarsten Otte 			break;
232727e0393fSCarsten Otte 		}
232827e0393fSCarsten Otte 
232927e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
233027e0393fSCarsten Otte 			r = -EINVAL;
233127e0393fSCarsten Otte 			break;
233227e0393fSCarsten Otte 		}
233327e0393fSCarsten Otte 
233427e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
233527e0393fSCarsten Otte 			ucasmap.length);
233627e0393fSCarsten Otte 		break;
233727e0393fSCarsten Otte 	}
233827e0393fSCarsten Otte #endif
2339ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2340527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2341ccc7910fSCarsten Otte 		break;
2342ccc7910fSCarsten Otte 	}
2343d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2344d6712df9SCornelia Huck 	{
2345d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2346d6712df9SCornelia Huck 		r = -EFAULT;
2347d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2348d6712df9SCornelia Huck 			break;
2349d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2350d6712df9SCornelia Huck 		break;
2351d6712df9SCornelia Huck 	}
2352*41408c28SThomas Huth 	case KVM_S390_MEM_OP: {
2353*41408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
2354*41408c28SThomas Huth 
2355*41408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2356*41408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2357*41408c28SThomas Huth 		else
2358*41408c28SThomas Huth 			r = -EFAULT;
2359*41408c28SThomas Huth 		break;
2360*41408c28SThomas Huth 	}
2361b0c632dbSHeiko Carstens 	default:
23623e6afcf1SCarsten Otte 		r = -ENOTTY;
2363b0c632dbSHeiko Carstens 	}
2364bc923cc9SAvi Kivity 	return r;
2365b0c632dbSHeiko Carstens }
2366b0c632dbSHeiko Carstens 
23675b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
23685b1c1493SCarsten Otte {
23695b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
23705b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
23715b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
23725b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
23735b1c1493SCarsten Otte 		get_page(vmf->page);
23745b1c1493SCarsten Otte 		return 0;
23755b1c1493SCarsten Otte 	}
23765b1c1493SCarsten Otte #endif
23775b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
23785b1c1493SCarsten Otte }
23795b1c1493SCarsten Otte 
23805587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
23815587027cSAneesh Kumar K.V 			    unsigned long npages)
2382db3fe4ebSTakuya Yoshikawa {
2383db3fe4ebSTakuya Yoshikawa 	return 0;
2384db3fe4ebSTakuya Yoshikawa }
2385db3fe4ebSTakuya Yoshikawa 
2386b0c632dbSHeiko Carstens /* Section: memory related */
2387f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2388f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
23897b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
23907b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2391b0c632dbSHeiko Carstens {
2392dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2393dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2394dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2395dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2396b0c632dbSHeiko Carstens 
2397598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2398b0c632dbSHeiko Carstens 		return -EINVAL;
2399b0c632dbSHeiko Carstens 
2400598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2401b0c632dbSHeiko Carstens 		return -EINVAL;
2402b0c632dbSHeiko Carstens 
2403f7784b8eSMarcelo Tosatti 	return 0;
2404f7784b8eSMarcelo Tosatti }
2405f7784b8eSMarcelo Tosatti 
2406f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
2407f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
24088482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
24098482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2410f7784b8eSMarcelo Tosatti {
2411f7850c92SCarsten Otte 	int rc;
2412f7784b8eSMarcelo Tosatti 
24132cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
24142cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
24152cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
24162cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
24172cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
24182cef4debSChristian Borntraeger 	 */
24192cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
24202cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
24212cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
24222cef4debSChristian Borntraeger 		return;
2423598841caSCarsten Otte 
2424598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2425598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2426598841caSCarsten Otte 	if (rc)
2427f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2428598841caSCarsten Otte 	return;
2429b0c632dbSHeiko Carstens }
2430b0c632dbSHeiko Carstens 
2431b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2432b0c632dbSHeiko Carstens {
24339d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2434b0c632dbSHeiko Carstens }
2435b0c632dbSHeiko Carstens 
2436b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2437b0c632dbSHeiko Carstens {
2438b0c632dbSHeiko Carstens 	kvm_exit();
2439b0c632dbSHeiko Carstens }
2440b0c632dbSHeiko Carstens 
2441b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2442b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2443566af940SCornelia Huck 
2444566af940SCornelia Huck /*
2445566af940SCornelia Huck  * Enable autoloading of the kvm module.
2446566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2447566af940SCornelia Huck  * since x86 takes a different approach.
2448566af940SCornelia Huck  */
2449566af940SCornelia Huck #include <linux/miscdevice.h>
2450566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2451566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2452