xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision bc17de7c966504b287a1dceb76a523d8b7816731)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
29b0c632dbSHeiko Carstens #include <asm/lowcore.h>
30b0c632dbSHeiko Carstens #include <asm/pgtable.h>
31f5daba1dSHeiko Carstens #include <asm/nmi.h>
32a0616cdeSDavid Howells #include <asm/switch_to.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
54ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
55f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
56ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
58aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
59ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
607697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
61ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6869d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
69453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
70453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
71453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
72453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
73453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
748a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
75453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
76453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
77b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
78453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
79453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
80bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
815288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
82bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
837697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
875288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
90cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
925288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
935288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
97388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
98e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9941628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
100b0c632dbSHeiko Carstens 	{ NULL }
101b0c632dbSHeiko Carstens };
102b0c632dbSHeiko Carstens 
1039d8d5786SMichael Mueller /* upper facilities limit for kvm */
1049d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
1059d8d5786SMichael Mueller 	0xff82fffbf4fc2000UL,
1069d8d5786SMichael Mueller 	0x005c000000000000UL,
1079d8d5786SMichael Mueller };
108b0c632dbSHeiko Carstens 
1099d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
11078c4b59fSMichael Mueller {
1119d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1129d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
11378c4b59fSMichael Mueller }
11478c4b59fSMichael Mueller 
1159d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
1169d8d5786SMichael Mueller 
117b0c632dbSHeiko Carstens /* Section: not file related */
11813a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
119b0c632dbSHeiko Carstens {
120b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
12110474ae8SAlexander Graf 	return 0;
122b0c632dbSHeiko Carstens }
123b0c632dbSHeiko Carstens 
1242c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1252c70fe44SChristian Borntraeger 
126b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
127b0c632dbSHeiko Carstens {
1282c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1292c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
130b0c632dbSHeiko Carstens 	return 0;
131b0c632dbSHeiko Carstens }
132b0c632dbSHeiko Carstens 
133b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
134b0c632dbSHeiko Carstens {
1352c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
136b0c632dbSHeiko Carstens }
137b0c632dbSHeiko Carstens 
138b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
139b0c632dbSHeiko Carstens {
14084877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
14184877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
142b0c632dbSHeiko Carstens }
143b0c632dbSHeiko Carstens 
144b0c632dbSHeiko Carstens /* Section: device related */
145b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
146b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
147b0c632dbSHeiko Carstens {
148b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
149b0c632dbSHeiko Carstens 		return s390_enable_sie();
150b0c632dbSHeiko Carstens 	return -EINVAL;
151b0c632dbSHeiko Carstens }
152b0c632dbSHeiko Carstens 
153784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
154b0c632dbSHeiko Carstens {
155d7b0b5ebSCarsten Otte 	int r;
156d7b0b5ebSCarsten Otte 
1572bd0ac4eSCarsten Otte 	switch (ext) {
158d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
159b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
16052e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1611efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1621efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1631efd0f59SCarsten Otte #endif
1643c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16560b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
16614eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
167d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
168fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
169ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
17010ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
171c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
172d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
17378599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
174f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1756352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
1762444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
177d7b0b5ebSCarsten Otte 		r = 1;
178d7b0b5ebSCarsten Otte 		break;
179e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
180e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
181e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
182e726b1bdSChristian Borntraeger 		break;
183e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
184e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
185e1e2e605SNick Wang 		break;
1861526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
187abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1881526bf9cSChristian Borntraeger 		break;
18968c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
19068c55750SEric Farman 		r = MACHINE_HAS_VX;
19168c55750SEric Farman 		break;
1922bd0ac4eSCarsten Otte 	default:
193d7b0b5ebSCarsten Otte 		r = 0;
194b0c632dbSHeiko Carstens 	}
195d7b0b5ebSCarsten Otte 	return r;
1962bd0ac4eSCarsten Otte }
197b0c632dbSHeiko Carstens 
19815f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
19915f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
20015f36ebdSJason J. Herne {
20115f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
20215f36ebdSJason J. Herne 	unsigned long address;
20315f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
20415f36ebdSJason J. Herne 
20515f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
20615f36ebdSJason J. Herne 	/* Loop over all guest pages */
20715f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
20815f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
20915f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
21015f36ebdSJason J. Herne 
21115f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
21215f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
21315f36ebdSJason J. Herne 	}
21415f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
21515f36ebdSJason J. Herne }
21615f36ebdSJason J. Herne 
217b0c632dbSHeiko Carstens /* Section: vm related */
218b0c632dbSHeiko Carstens /*
219b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
220b0c632dbSHeiko Carstens  */
221b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
222b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
223b0c632dbSHeiko Carstens {
22415f36ebdSJason J. Herne 	int r;
22515f36ebdSJason J. Herne 	unsigned long n;
22615f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
22715f36ebdSJason J. Herne 	int is_dirty = 0;
22815f36ebdSJason J. Herne 
22915f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
23015f36ebdSJason J. Herne 
23115f36ebdSJason J. Herne 	r = -EINVAL;
23215f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
23315f36ebdSJason J. Herne 		goto out;
23415f36ebdSJason J. Herne 
23515f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
23615f36ebdSJason J. Herne 	r = -ENOENT;
23715f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
23815f36ebdSJason J. Herne 		goto out;
23915f36ebdSJason J. Herne 
24015f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
24115f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
24215f36ebdSJason J. Herne 	if (r)
24315f36ebdSJason J. Herne 		goto out;
24415f36ebdSJason J. Herne 
24515f36ebdSJason J. Herne 	/* Clear the dirty log */
24615f36ebdSJason J. Herne 	if (is_dirty) {
24715f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
24815f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
24915f36ebdSJason J. Herne 	}
25015f36ebdSJason J. Herne 	r = 0;
25115f36ebdSJason J. Herne out:
25215f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
25315f36ebdSJason J. Herne 	return r;
254b0c632dbSHeiko Carstens }
255b0c632dbSHeiko Carstens 
256d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
257d938dc55SCornelia Huck {
258d938dc55SCornelia Huck 	int r;
259d938dc55SCornelia Huck 
260d938dc55SCornelia Huck 	if (cap->flags)
261d938dc55SCornelia Huck 		return -EINVAL;
262d938dc55SCornelia Huck 
263d938dc55SCornelia Huck 	switch (cap->cap) {
26484223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
26584223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
26684223598SCornelia Huck 		r = 0;
26784223598SCornelia Huck 		break;
2682444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
2692444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
2702444b352SDavid Hildenbrand 		r = 0;
2712444b352SDavid Hildenbrand 		break;
27268c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
27368c55750SEric Farman 		kvm->arch.use_vectors = MACHINE_HAS_VX;
27468c55750SEric Farman 		r = MACHINE_HAS_VX ? 0 : -EINVAL;
27568c55750SEric Farman 		break;
276d938dc55SCornelia Huck 	default:
277d938dc55SCornelia Huck 		r = -EINVAL;
278d938dc55SCornelia Huck 		break;
279d938dc55SCornelia Huck 	}
280d938dc55SCornelia Huck 	return r;
281d938dc55SCornelia Huck }
282d938dc55SCornelia Huck 
2838c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2848c0a7ce6SDominik Dingel {
2858c0a7ce6SDominik Dingel 	int ret;
2868c0a7ce6SDominik Dingel 
2878c0a7ce6SDominik Dingel 	switch (attr->attr) {
2888c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2898c0a7ce6SDominik Dingel 		ret = 0;
2908c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2918c0a7ce6SDominik Dingel 			ret = -EFAULT;
2928c0a7ce6SDominik Dingel 		break;
2938c0a7ce6SDominik Dingel 	default:
2948c0a7ce6SDominik Dingel 		ret = -ENXIO;
2958c0a7ce6SDominik Dingel 		break;
2968c0a7ce6SDominik Dingel 	}
2978c0a7ce6SDominik Dingel 	return ret;
2988c0a7ce6SDominik Dingel }
2998c0a7ce6SDominik Dingel 
3008c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3014f718eabSDominik Dingel {
3024f718eabSDominik Dingel 	int ret;
3034f718eabSDominik Dingel 	unsigned int idx;
3044f718eabSDominik Dingel 	switch (attr->attr) {
3054f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
3064f718eabSDominik Dingel 		ret = -EBUSY;
3074f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3084f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3094f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
3104f718eabSDominik Dingel 			ret = 0;
3114f718eabSDominik Dingel 		}
3124f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3134f718eabSDominik Dingel 		break;
3144f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3154f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3164f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
317a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3184f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3194f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3204f718eabSDominik Dingel 		ret = 0;
3214f718eabSDominik Dingel 		break;
3228c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3238c0a7ce6SDominik Dingel 		unsigned long new_limit;
3248c0a7ce6SDominik Dingel 
3258c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3268c0a7ce6SDominik Dingel 			return -EINVAL;
3278c0a7ce6SDominik Dingel 
3288c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3298c0a7ce6SDominik Dingel 			return -EFAULT;
3308c0a7ce6SDominik Dingel 
3318c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3328c0a7ce6SDominik Dingel 			return -E2BIG;
3338c0a7ce6SDominik Dingel 
3348c0a7ce6SDominik Dingel 		ret = -EBUSY;
3358c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3368c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3378c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3388c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3398c0a7ce6SDominik Dingel 
3408c0a7ce6SDominik Dingel 			if (!new) {
3418c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3428c0a7ce6SDominik Dingel 			} else {
3438c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3448c0a7ce6SDominik Dingel 				new->private = kvm;
3458c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3468c0a7ce6SDominik Dingel 				ret = 0;
3478c0a7ce6SDominik Dingel 			}
3488c0a7ce6SDominik Dingel 		}
3498c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3508c0a7ce6SDominik Dingel 		break;
3518c0a7ce6SDominik Dingel 	}
3524f718eabSDominik Dingel 	default:
3534f718eabSDominik Dingel 		ret = -ENXIO;
3544f718eabSDominik Dingel 		break;
3554f718eabSDominik Dingel 	}
3564f718eabSDominik Dingel 	return ret;
3574f718eabSDominik Dingel }
3584f718eabSDominik Dingel 
359a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
360a374e892STony Krowiak 
361a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
362a374e892STony Krowiak {
363a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
364a374e892STony Krowiak 	int i;
365a374e892STony Krowiak 
3669d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
367a374e892STony Krowiak 		return -EINVAL;
368a374e892STony Krowiak 
369a374e892STony Krowiak 	mutex_lock(&kvm->lock);
370a374e892STony Krowiak 	switch (attr->attr) {
371a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
372a374e892STony Krowiak 		get_random_bytes(
373a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
374a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
375a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
376a374e892STony Krowiak 		break;
377a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
378a374e892STony Krowiak 		get_random_bytes(
379a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
380a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
381a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
382a374e892STony Krowiak 		break;
383a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
384a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
385a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
386a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
387a374e892STony Krowiak 		break;
388a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
389a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
390a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
391a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
392a374e892STony Krowiak 		break;
393a374e892STony Krowiak 	default:
394a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
395a374e892STony Krowiak 		return -ENXIO;
396a374e892STony Krowiak 	}
397a374e892STony Krowiak 
398a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
399a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
400a374e892STony Krowiak 		exit_sie(vcpu);
401a374e892STony Krowiak 	}
402a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
403a374e892STony Krowiak 	return 0;
404a374e892STony Krowiak }
405a374e892STony Krowiak 
40672f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
40772f25020SJason J. Herne {
40872f25020SJason J. Herne 	u8 gtod_high;
40972f25020SJason J. Herne 
41072f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
41172f25020SJason J. Herne 					   sizeof(gtod_high)))
41272f25020SJason J. Herne 		return -EFAULT;
41372f25020SJason J. Herne 
41472f25020SJason J. Herne 	if (gtod_high != 0)
41572f25020SJason J. Herne 		return -EINVAL;
41672f25020SJason J. Herne 
41772f25020SJason J. Herne 	return 0;
41872f25020SJason J. Herne }
41972f25020SJason J. Herne 
42072f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
42172f25020SJason J. Herne {
42272f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
42372f25020SJason J. Herne 	unsigned int vcpu_idx;
42472f25020SJason J. Herne 	u64 host_tod, gtod;
42572f25020SJason J. Herne 	int r;
42672f25020SJason J. Herne 
42772f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
42872f25020SJason J. Herne 		return -EFAULT;
42972f25020SJason J. Herne 
43072f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
43172f25020SJason J. Herne 	if (r)
43272f25020SJason J. Herne 		return r;
43372f25020SJason J. Herne 
43472f25020SJason J. Herne 	mutex_lock(&kvm->lock);
43572f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
43672f25020SJason J. Herne 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
43772f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
43872f25020SJason J. Herne 		exit_sie(cur_vcpu);
43972f25020SJason J. Herne 	}
44072f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
44172f25020SJason J. Herne 	return 0;
44272f25020SJason J. Herne }
44372f25020SJason J. Herne 
44472f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
44572f25020SJason J. Herne {
44672f25020SJason J. Herne 	int ret;
44772f25020SJason J. Herne 
44872f25020SJason J. Herne 	if (attr->flags)
44972f25020SJason J. Herne 		return -EINVAL;
45072f25020SJason J. Herne 
45172f25020SJason J. Herne 	switch (attr->attr) {
45272f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
45372f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
45472f25020SJason J. Herne 		break;
45572f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
45672f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
45772f25020SJason J. Herne 		break;
45872f25020SJason J. Herne 	default:
45972f25020SJason J. Herne 		ret = -ENXIO;
46072f25020SJason J. Herne 		break;
46172f25020SJason J. Herne 	}
46272f25020SJason J. Herne 	return ret;
46372f25020SJason J. Herne }
46472f25020SJason J. Herne 
46572f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
46672f25020SJason J. Herne {
46772f25020SJason J. Herne 	u8 gtod_high = 0;
46872f25020SJason J. Herne 
46972f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
47072f25020SJason J. Herne 					 sizeof(gtod_high)))
47172f25020SJason J. Herne 		return -EFAULT;
47272f25020SJason J. Herne 
47372f25020SJason J. Herne 	return 0;
47472f25020SJason J. Herne }
47572f25020SJason J. Herne 
47672f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
47772f25020SJason J. Herne {
47872f25020SJason J. Herne 	u64 host_tod, gtod;
47972f25020SJason J. Herne 	int r;
48072f25020SJason J. Herne 
48172f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
48272f25020SJason J. Herne 	if (r)
48372f25020SJason J. Herne 		return r;
48472f25020SJason J. Herne 
48572f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
48672f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
48772f25020SJason J. Herne 		return -EFAULT;
48872f25020SJason J. Herne 
48972f25020SJason J. Herne 	return 0;
49072f25020SJason J. Herne }
49172f25020SJason J. Herne 
49272f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
49372f25020SJason J. Herne {
49472f25020SJason J. Herne 	int ret;
49572f25020SJason J. Herne 
49672f25020SJason J. Herne 	if (attr->flags)
49772f25020SJason J. Herne 		return -EINVAL;
49872f25020SJason J. Herne 
49972f25020SJason J. Herne 	switch (attr->attr) {
50072f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
50172f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
50272f25020SJason J. Herne 		break;
50372f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
50472f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
50572f25020SJason J. Herne 		break;
50672f25020SJason J. Herne 	default:
50772f25020SJason J. Herne 		ret = -ENXIO;
50872f25020SJason J. Herne 		break;
50972f25020SJason J. Herne 	}
51072f25020SJason J. Herne 	return ret;
51172f25020SJason J. Herne }
51272f25020SJason J. Herne 
513658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
514658b6edaSMichael Mueller {
515658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
516658b6edaSMichael Mueller 	int ret = 0;
517658b6edaSMichael Mueller 
518658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
519658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
520658b6edaSMichael Mueller 		ret = -EBUSY;
521658b6edaSMichael Mueller 		goto out;
522658b6edaSMichael Mueller 	}
523658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
524658b6edaSMichael Mueller 	if (!proc) {
525658b6edaSMichael Mueller 		ret = -ENOMEM;
526658b6edaSMichael Mueller 		goto out;
527658b6edaSMichael Mueller 	}
528658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
529658b6edaSMichael Mueller 			    sizeof(*proc))) {
530658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
531658b6edaSMichael Mueller 		       sizeof(struct cpuid));
532658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
533981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
534658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
535658b6edaSMichael Mueller 	} else
536658b6edaSMichael Mueller 		ret = -EFAULT;
537658b6edaSMichael Mueller 	kfree(proc);
538658b6edaSMichael Mueller out:
539658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
540658b6edaSMichael Mueller 	return ret;
541658b6edaSMichael Mueller }
542658b6edaSMichael Mueller 
543658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
544658b6edaSMichael Mueller {
545658b6edaSMichael Mueller 	int ret = -ENXIO;
546658b6edaSMichael Mueller 
547658b6edaSMichael Mueller 	switch (attr->attr) {
548658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
549658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
550658b6edaSMichael Mueller 		break;
551658b6edaSMichael Mueller 	}
552658b6edaSMichael Mueller 	return ret;
553658b6edaSMichael Mueller }
554658b6edaSMichael Mueller 
555658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
556658b6edaSMichael Mueller {
557658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
558658b6edaSMichael Mueller 	int ret = 0;
559658b6edaSMichael Mueller 
560658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
561658b6edaSMichael Mueller 	if (!proc) {
562658b6edaSMichael Mueller 		ret = -ENOMEM;
563658b6edaSMichael Mueller 		goto out;
564658b6edaSMichael Mueller 	}
565658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
566658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
567981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
568658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
569658b6edaSMichael Mueller 		ret = -EFAULT;
570658b6edaSMichael Mueller 	kfree(proc);
571658b6edaSMichael Mueller out:
572658b6edaSMichael Mueller 	return ret;
573658b6edaSMichael Mueller }
574658b6edaSMichael Mueller 
575658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
576658b6edaSMichael Mueller {
577658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
578658b6edaSMichael Mueller 	int ret = 0;
579658b6edaSMichael Mueller 
580658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
581658b6edaSMichael Mueller 	if (!mach) {
582658b6edaSMichael Mueller 		ret = -ENOMEM;
583658b6edaSMichael Mueller 		goto out;
584658b6edaSMichael Mueller 	}
585658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
586658b6edaSMichael Mueller 	mach->ibc = sclp_get_ibc();
587981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
588981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
589658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
59094422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
591658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
592658b6edaSMichael Mueller 		ret = -EFAULT;
593658b6edaSMichael Mueller 	kfree(mach);
594658b6edaSMichael Mueller out:
595658b6edaSMichael Mueller 	return ret;
596658b6edaSMichael Mueller }
597658b6edaSMichael Mueller 
598658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
599658b6edaSMichael Mueller {
600658b6edaSMichael Mueller 	int ret = -ENXIO;
601658b6edaSMichael Mueller 
602658b6edaSMichael Mueller 	switch (attr->attr) {
603658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
604658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
605658b6edaSMichael Mueller 		break;
606658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
607658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
608658b6edaSMichael Mueller 		break;
609658b6edaSMichael Mueller 	}
610658b6edaSMichael Mueller 	return ret;
611658b6edaSMichael Mueller }
612658b6edaSMichael Mueller 
613f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
614f2061656SDominik Dingel {
615f2061656SDominik Dingel 	int ret;
616f2061656SDominik Dingel 
617f2061656SDominik Dingel 	switch (attr->group) {
6184f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6198c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
6204f718eabSDominik Dingel 		break;
62172f25020SJason J. Herne 	case KVM_S390_VM_TOD:
62272f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
62372f25020SJason J. Herne 		break;
624658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
625658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
626658b6edaSMichael Mueller 		break;
627a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
628a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
629a374e892STony Krowiak 		break;
630f2061656SDominik Dingel 	default:
631f2061656SDominik Dingel 		ret = -ENXIO;
632f2061656SDominik Dingel 		break;
633f2061656SDominik Dingel 	}
634f2061656SDominik Dingel 
635f2061656SDominik Dingel 	return ret;
636f2061656SDominik Dingel }
637f2061656SDominik Dingel 
638f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
639f2061656SDominik Dingel {
6408c0a7ce6SDominik Dingel 	int ret;
6418c0a7ce6SDominik Dingel 
6428c0a7ce6SDominik Dingel 	switch (attr->group) {
6438c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6448c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
6458c0a7ce6SDominik Dingel 		break;
64672f25020SJason J. Herne 	case KVM_S390_VM_TOD:
64772f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
64872f25020SJason J. Herne 		break;
649658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
650658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
651658b6edaSMichael Mueller 		break;
6528c0a7ce6SDominik Dingel 	default:
6538c0a7ce6SDominik Dingel 		ret = -ENXIO;
6548c0a7ce6SDominik Dingel 		break;
6558c0a7ce6SDominik Dingel 	}
6568c0a7ce6SDominik Dingel 
6578c0a7ce6SDominik Dingel 	return ret;
658f2061656SDominik Dingel }
659f2061656SDominik Dingel 
660f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
661f2061656SDominik Dingel {
662f2061656SDominik Dingel 	int ret;
663f2061656SDominik Dingel 
664f2061656SDominik Dingel 	switch (attr->group) {
6654f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6664f718eabSDominik Dingel 		switch (attr->attr) {
6674f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
6684f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
6698c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
6704f718eabSDominik Dingel 			ret = 0;
6714f718eabSDominik Dingel 			break;
6724f718eabSDominik Dingel 		default:
6734f718eabSDominik Dingel 			ret = -ENXIO;
6744f718eabSDominik Dingel 			break;
6754f718eabSDominik Dingel 		}
6764f718eabSDominik Dingel 		break;
67772f25020SJason J. Herne 	case KVM_S390_VM_TOD:
67872f25020SJason J. Herne 		switch (attr->attr) {
67972f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
68072f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
68172f25020SJason J. Herne 			ret = 0;
68272f25020SJason J. Herne 			break;
68372f25020SJason J. Herne 		default:
68472f25020SJason J. Herne 			ret = -ENXIO;
68572f25020SJason J. Herne 			break;
68672f25020SJason J. Herne 		}
68772f25020SJason J. Herne 		break;
688658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
689658b6edaSMichael Mueller 		switch (attr->attr) {
690658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
691658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
692658b6edaSMichael Mueller 			ret = 0;
693658b6edaSMichael Mueller 			break;
694658b6edaSMichael Mueller 		default:
695658b6edaSMichael Mueller 			ret = -ENXIO;
696658b6edaSMichael Mueller 			break;
697658b6edaSMichael Mueller 		}
698658b6edaSMichael Mueller 		break;
699a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
700a374e892STony Krowiak 		switch (attr->attr) {
701a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
702a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
703a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
704a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
705a374e892STony Krowiak 			ret = 0;
706a374e892STony Krowiak 			break;
707a374e892STony Krowiak 		default:
708a374e892STony Krowiak 			ret = -ENXIO;
709a374e892STony Krowiak 			break;
710a374e892STony Krowiak 		}
711a374e892STony Krowiak 		break;
712f2061656SDominik Dingel 	default:
713f2061656SDominik Dingel 		ret = -ENXIO;
714f2061656SDominik Dingel 		break;
715f2061656SDominik Dingel 	}
716f2061656SDominik Dingel 
717f2061656SDominik Dingel 	return ret;
718f2061656SDominik Dingel }
719f2061656SDominik Dingel 
720b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
721b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
722b0c632dbSHeiko Carstens {
723b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
724b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
725f2061656SDominik Dingel 	struct kvm_device_attr attr;
726b0c632dbSHeiko Carstens 	int r;
727b0c632dbSHeiko Carstens 
728b0c632dbSHeiko Carstens 	switch (ioctl) {
729ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
730ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
731ba5c1e9bSCarsten Otte 
732ba5c1e9bSCarsten Otte 		r = -EFAULT;
733ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
734ba5c1e9bSCarsten Otte 			break;
735ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
736ba5c1e9bSCarsten Otte 		break;
737ba5c1e9bSCarsten Otte 	}
738d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
739d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
740d938dc55SCornelia Huck 		r = -EFAULT;
741d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
742d938dc55SCornelia Huck 			break;
743d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
744d938dc55SCornelia Huck 		break;
745d938dc55SCornelia Huck 	}
74684223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
74784223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
74884223598SCornelia Huck 
74984223598SCornelia Huck 		r = -EINVAL;
75084223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
75184223598SCornelia Huck 			/* Set up dummy routing. */
75284223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
75384223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
75484223598SCornelia Huck 			r = 0;
75584223598SCornelia Huck 		}
75684223598SCornelia Huck 		break;
75784223598SCornelia Huck 	}
758f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
759f2061656SDominik Dingel 		r = -EFAULT;
760f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
761f2061656SDominik Dingel 			break;
762f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
763f2061656SDominik Dingel 		break;
764f2061656SDominik Dingel 	}
765f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
766f2061656SDominik Dingel 		r = -EFAULT;
767f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
768f2061656SDominik Dingel 			break;
769f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
770f2061656SDominik Dingel 		break;
771f2061656SDominik Dingel 	}
772f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
773f2061656SDominik Dingel 		r = -EFAULT;
774f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
775f2061656SDominik Dingel 			break;
776f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
777f2061656SDominik Dingel 		break;
778f2061656SDominik Dingel 	}
779b0c632dbSHeiko Carstens 	default:
780367e1319SAvi Kivity 		r = -ENOTTY;
781b0c632dbSHeiko Carstens 	}
782b0c632dbSHeiko Carstens 
783b0c632dbSHeiko Carstens 	return r;
784b0c632dbSHeiko Carstens }
785b0c632dbSHeiko Carstens 
78645c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
78745c9b47cSTony Krowiak {
78845c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
78986044c8cSChristian Borntraeger 	u32 cc = 0;
79045c9b47cSTony Krowiak 
79186044c8cSChristian Borntraeger 	memset(config, 0, 128);
79245c9b47cSTony Krowiak 	asm volatile(
79345c9b47cSTony Krowiak 		"lgr 0,%1\n"
79445c9b47cSTony Krowiak 		"lgr 2,%2\n"
79545c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
79686044c8cSChristian Borntraeger 		"0: ipm %0\n"
79745c9b47cSTony Krowiak 		"srl %0,28\n"
79886044c8cSChristian Borntraeger 		"1:\n"
79986044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
80086044c8cSChristian Borntraeger 		: "+r" (cc)
80145c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
80245c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
80345c9b47cSTony Krowiak 	);
80445c9b47cSTony Krowiak 
80545c9b47cSTony Krowiak 	return cc;
80645c9b47cSTony Krowiak }
80745c9b47cSTony Krowiak 
80845c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
80945c9b47cSTony Krowiak {
81045c9b47cSTony Krowiak 	u8 config[128];
81145c9b47cSTony Krowiak 	int cc;
81245c9b47cSTony Krowiak 
81345c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
81445c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
81545c9b47cSTony Krowiak 
81645c9b47cSTony Krowiak 		if (cc)
81745c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
81845c9b47cSTony Krowiak 		else
81945c9b47cSTony Krowiak 			return config[0] & 0x40;
82045c9b47cSTony Krowiak 	}
82145c9b47cSTony Krowiak 
82245c9b47cSTony Krowiak 	return 0;
82345c9b47cSTony Krowiak }
82445c9b47cSTony Krowiak 
82545c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
82645c9b47cSTony Krowiak {
82745c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
82845c9b47cSTony Krowiak 
82945c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
83045c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
83145c9b47cSTony Krowiak 	else
83245c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
83345c9b47cSTony Krowiak }
83445c9b47cSTony Krowiak 
8359d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
8369d8d5786SMichael Mueller {
8379d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
8389d8d5786SMichael Mueller 	cpu_id->version = 0xff;
8399d8d5786SMichael Mueller }
8409d8d5786SMichael Mueller 
8415102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
8425102ee87STony Krowiak {
8439d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
8445102ee87STony Krowiak 		return 0;
8455102ee87STony Krowiak 
8465102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
8475102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
8485102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
8495102ee87STony Krowiak 		return -ENOMEM;
8505102ee87STony Krowiak 
85145c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
8525102ee87STony Krowiak 
853ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
854ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
855ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
856ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
857ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
858ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
859ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
860a374e892STony Krowiak 
8615102ee87STony Krowiak 	return 0;
8625102ee87STony Krowiak }
8635102ee87STony Krowiak 
864e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
865b0c632dbSHeiko Carstens {
8669d8d5786SMichael Mueller 	int i, rc;
867b0c632dbSHeiko Carstens 	char debug_name[16];
868f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
869b0c632dbSHeiko Carstens 
870e08b9637SCarsten Otte 	rc = -EINVAL;
871e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
872e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
873e08b9637SCarsten Otte 		goto out_err;
874e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
875e08b9637SCarsten Otte 		goto out_err;
876e08b9637SCarsten Otte #else
877e08b9637SCarsten Otte 	if (type)
878e08b9637SCarsten Otte 		goto out_err;
879e08b9637SCarsten Otte #endif
880e08b9637SCarsten Otte 
881b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
882b0c632dbSHeiko Carstens 	if (rc)
883d89f5effSJan Kiszka 		goto out_err;
884b0c632dbSHeiko Carstens 
885b290411aSCarsten Otte 	rc = -ENOMEM;
886b290411aSCarsten Otte 
887b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
888b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
889d89f5effSJan Kiszka 		goto out_err;
890f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
891f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
892f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
893f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
894b0c632dbSHeiko Carstens 
895b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
896b0c632dbSHeiko Carstens 
897b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
898b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
899b0c632dbSHeiko Carstens 		goto out_nodbf;
900b0c632dbSHeiko Carstens 
9019d8d5786SMichael Mueller 	/*
9029d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
9039d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
904981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
905981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
9069d8d5786SMichael Mueller 	 * 31 bits and word aligned.
9079d8d5786SMichael Mueller 	 */
9089d8d5786SMichael Mueller 	kvm->arch.model.fac =
909981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9109d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
9119d8d5786SMichael Mueller 		goto out_nofac;
9129d8d5786SMichael Mueller 
913fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
914981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
91594422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
9169d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
9179d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
918981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9199d8d5786SMichael Mueller 		else
920981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
9219d8d5786SMichael Mueller 	}
9229d8d5786SMichael Mueller 
923981467c9SMichael Mueller 	/* Populate the facility list initially. */
924981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
925981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
926981467c9SMichael Mueller 
9279d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
928658b6edaSMichael Mueller 	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9299d8d5786SMichael Mueller 
9305102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
9315102ee87STony Krowiak 		goto out_crypto;
9325102ee87STony Krowiak 
933ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
934ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
9358a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
936a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
937ba5c1e9bSCarsten Otte 
938b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
939b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
940b0c632dbSHeiko Carstens 
941e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
942e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
943e08b9637SCarsten Otte 	} else {
9440349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
945598841caSCarsten Otte 		if (!kvm->arch.gmap)
946598841caSCarsten Otte 			goto out_nogmap;
9472c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
94824eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
949e08b9637SCarsten Otte 	}
950fa6b7fe9SCornelia Huck 
951fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
95284223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
95368c55750SEric Farman 	kvm->arch.use_vectors = 0;
95472f25020SJason J. Herne 	kvm->arch.epoch = 0;
955fa6b7fe9SCornelia Huck 
9568ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
9578ad35755SDavid Hildenbrand 
958d89f5effSJan Kiszka 	return 0;
959598841caSCarsten Otte out_nogmap:
9605102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
9615102ee87STony Krowiak out_crypto:
9629d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
9639d8d5786SMichael Mueller out_nofac:
964598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
965b0c632dbSHeiko Carstens out_nodbf:
966b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
967d89f5effSJan Kiszka out_err:
968d89f5effSJan Kiszka 	return rc;
969b0c632dbSHeiko Carstens }
970b0c632dbSHeiko Carstens 
971d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
972d329c035SChristian Borntraeger {
973d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
974ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
97567335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
9763c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
97758f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
97858f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
97958f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
980abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
981abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
982abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
98358f9460bSCarsten Otte 	}
984abf4a71eSCarsten Otte 	smp_mb();
98527e0393fSCarsten Otte 
98627e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
98727e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
98827e0393fSCarsten Otte 
989b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
990b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
991d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
992b31288faSKonstantin Weitz 
9936692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
994b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
995d329c035SChristian Borntraeger }
996d329c035SChristian Borntraeger 
997d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
998d329c035SChristian Borntraeger {
999d329c035SChristian Borntraeger 	unsigned int i;
1000988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1001d329c035SChristian Borntraeger 
1002988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1003988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1004988a2caeSGleb Natapov 
1005988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1006988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1007d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1008988a2caeSGleb Natapov 
1009988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1010988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1011d329c035SChristian Borntraeger }
1012d329c035SChristian Borntraeger 
1013b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1014b0c632dbSHeiko Carstens {
1015d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
10169d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1017b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1018d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
10195102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
102027e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1021598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1022841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
102367335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1024b0c632dbSHeiko Carstens }
1025b0c632dbSHeiko Carstens 
1026b0c632dbSHeiko Carstens /* Section: vcpu related */
1027dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1028b0c632dbSHeiko Carstens {
1029c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
103027e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
103127e0393fSCarsten Otte 		return -ENOMEM;
10322c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1033dafd032aSDominik Dingel 
103427e0393fSCarsten Otte 	return 0;
103527e0393fSCarsten Otte }
103627e0393fSCarsten Otte 
1037dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1038dafd032aSDominik Dingel {
1039dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1040dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
104159674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
104259674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
10439eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1044b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1045b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1046b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
104768c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
104868c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1049dafd032aSDominik Dingel 
1050dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1051dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1052dafd032aSDominik Dingel 
1053b0c632dbSHeiko Carstens 	return 0;
1054b0c632dbSHeiko Carstens }
1055b0c632dbSHeiko Carstens 
1056b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1057b0c632dbSHeiko Carstens {
10584725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
105968c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
106068c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
106168c55750SEric Farman 	else
10624725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1063b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
106468c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
106568c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
106668c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
106768c55750SEric Farman 	} else {
10684725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10694725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
107068c55750SEric Farman 	}
107159674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1072480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
10739e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1074b0c632dbSHeiko Carstens }
1075b0c632dbSHeiko Carstens 
1076b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1077b0c632dbSHeiko Carstens {
10789e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1079480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
108068c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
108168c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
108268c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
108368c55750SEric Farman 	} else {
10844725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10854725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
108668c55750SEric Farman 	}
108759674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
10884725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
108968c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
109068c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
109168c55750SEric Farman 	else
10924725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1093b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1094b0c632dbSHeiko Carstens }
1095b0c632dbSHeiko Carstens 
1096b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1097b0c632dbSHeiko Carstens {
1098b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1099b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1100b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
11018d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1102b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1103b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1104b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1105b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1106b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1107b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1108b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1109b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1110b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1111672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
11123c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
11133c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
11146352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
11156852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
11162ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1117b0c632dbSHeiko Carstens }
1118b0c632dbSHeiko Carstens 
111931928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
112042897d86SMarcelo Tosatti {
112172f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
112272f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
112372f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1124dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1125dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
112642897d86SMarcelo Tosatti }
112742897d86SMarcelo Tosatti 
11285102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
11295102ee87STony Krowiak {
11309d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
11315102ee87STony Krowiak 		return;
11325102ee87STony Krowiak 
1133a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1134a374e892STony Krowiak 
1135a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1136a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1137a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1138a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1139a374e892STony Krowiak 
11405102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
11415102ee87STony Krowiak }
11425102ee87STony Krowiak 
1143b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1144b31605c1SDominik Dingel {
1145b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1146b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1147b31605c1SDominik Dingel }
1148b31605c1SDominik Dingel 
1149b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1150b31605c1SDominik Dingel {
1151b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1152b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1153b31605c1SDominik Dingel 		return -ENOMEM;
1154b31605c1SDominik Dingel 
1155b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1156b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1157b31605c1SDominik Dingel 	return 0;
1158b31605c1SDominik Dingel }
1159b31605c1SDominik Dingel 
116091520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
116191520f1aSMichael Mueller {
116291520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
116391520f1aSMichael Mueller 
116491520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
116591520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
116691520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
116791520f1aSMichael Mueller }
116891520f1aSMichael Mueller 
1169b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1170b0c632dbSHeiko Carstens {
1171b31605c1SDominik Dingel 	int rc = 0;
1172b31288faSKonstantin Weitz 
11739e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
11749e6dabefSCornelia Huck 						    CPUSTAT_SM |
117569d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
117669d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
117791520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
117891520f1aSMichael Mueller 
1179fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
11809d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
11817feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
11827feb6bb8SMichael Mueller 
118369d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1184ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
1185217a4406SHeiko Carstens 	if (sclp_has_siif())
1186217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
1187ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
1188ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1189492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
11905a5e6536SMatthew Rosato 
1191b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1192b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1193b31605c1SDominik Dingel 		if (rc)
1194b31605c1SDominik Dingel 			return rc;
1195b31288faSKonstantin Weitz 	}
11960ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1197ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
11989d8d5786SMichael Mueller 
11995102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
12005102ee87STony Krowiak 
1201b31605c1SDominik Dingel 	return rc;
1202b0c632dbSHeiko Carstens }
1203b0c632dbSHeiko Carstens 
1204b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1205b0c632dbSHeiko Carstens 				      unsigned int id)
1206b0c632dbSHeiko Carstens {
12074d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
12087feb6bb8SMichael Mueller 	struct sie_page *sie_page;
12094d47555aSCarsten Otte 	int rc = -EINVAL;
1210b0c632dbSHeiko Carstens 
12114d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
12124d47555aSCarsten Otte 		goto out;
12134d47555aSCarsten Otte 
12144d47555aSCarsten Otte 	rc = -ENOMEM;
12154d47555aSCarsten Otte 
1216b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1217b0c632dbSHeiko Carstens 	if (!vcpu)
12184d47555aSCarsten Otte 		goto out;
1219b0c632dbSHeiko Carstens 
12207feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
12217feb6bb8SMichael Mueller 	if (!sie_page)
1222b0c632dbSHeiko Carstens 		goto out_free_cpu;
1223b0c632dbSHeiko Carstens 
12247feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
12257feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
122668c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
12277feb6bb8SMichael Mueller 
1228b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
122958f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
123058f9460bSCarsten Otte 		if (!kvm->arch.sca) {
123158f9460bSCarsten Otte 			WARN_ON_ONCE(1);
123258f9460bSCarsten Otte 			goto out_free_cpu;
123358f9460bSCarsten Otte 		}
1234abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
123558f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
123658f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
123758f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
123858f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1239b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1240fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
124158f9460bSCarsten Otte 	}
1242b0c632dbSHeiko Carstens 
1243ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1244ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1245d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
12465288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1247ba5c1e9bSCarsten Otte 
1248b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1249b0c632dbSHeiko Carstens 	if (rc)
12507b06bf2fSWei Yongjun 		goto out_free_sie_block;
1251b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1252b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1253ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1254b0c632dbSHeiko Carstens 
1255b0c632dbSHeiko Carstens 	return vcpu;
12567b06bf2fSWei Yongjun out_free_sie_block:
12577b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1258b0c632dbSHeiko Carstens out_free_cpu:
1259b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
12604d47555aSCarsten Otte out:
1261b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1262b0c632dbSHeiko Carstens }
1263b0c632dbSHeiko Carstens 
1264b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1265b0c632dbSHeiko Carstens {
12669a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1267b0c632dbSHeiko Carstens }
1268b0c632dbSHeiko Carstens 
126949b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
127049b99e1eSChristian Borntraeger {
127149b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
127249b99e1eSChristian Borntraeger }
127349b99e1eSChristian Borntraeger 
127449b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
127549b99e1eSChristian Borntraeger {
127649b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
127749b99e1eSChristian Borntraeger }
127849b99e1eSChristian Borntraeger 
127949b99e1eSChristian Borntraeger /*
128049b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
128149b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
128249b99e1eSChristian Borntraeger  * return immediately. */
128349b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
128449b99e1eSChristian Borntraeger {
128549b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
128649b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
128749b99e1eSChristian Borntraeger 		cpu_relax();
128849b99e1eSChristian Borntraeger }
128949b99e1eSChristian Borntraeger 
129049b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
129149b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
129249b99e1eSChristian Borntraeger {
129349b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
129449b99e1eSChristian Borntraeger 	exit_sie(vcpu);
129549b99e1eSChristian Borntraeger }
129649b99e1eSChristian Borntraeger 
12972c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
12982c70fe44SChristian Borntraeger {
12992c70fe44SChristian Borntraeger 	int i;
13002c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
13012c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
13022c70fe44SChristian Borntraeger 
13032c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
13042c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1305fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
13062c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
13072c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
13082c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
13092c70fe44SChristian Borntraeger 		}
13102c70fe44SChristian Borntraeger 	}
13112c70fe44SChristian Borntraeger }
13122c70fe44SChristian Borntraeger 
1313b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1314b6d33834SChristoffer Dall {
1315b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1316b6d33834SChristoffer Dall 	BUG();
1317b6d33834SChristoffer Dall 	return 0;
1318b6d33834SChristoffer Dall }
1319b6d33834SChristoffer Dall 
132014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
132114eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
132214eebd91SCarsten Otte {
132314eebd91SCarsten Otte 	int r = -EINVAL;
132414eebd91SCarsten Otte 
132514eebd91SCarsten Otte 	switch (reg->id) {
132629b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
132729b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
132829b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
132929b7c71bSCarsten Otte 		break;
133029b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
133129b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
133229b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
133329b7c71bSCarsten Otte 		break;
133446a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
133546a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
133646a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
133746a6dd1cSJason J. herne 		break;
133846a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
133946a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
134046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
134146a6dd1cSJason J. herne 		break;
1342536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1343536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1344536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1345536336c2SDominik Dingel 		break;
1346536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1347536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1348536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1349536336c2SDominik Dingel 		break;
1350536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1351536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1352536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1353536336c2SDominik Dingel 		break;
1354672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1355672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1356672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1357672550fbSChristian Borntraeger 		break;
1358afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1359afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1360afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1361afa45ff5SChristian Borntraeger 		break;
136214eebd91SCarsten Otte 	default:
136314eebd91SCarsten Otte 		break;
136414eebd91SCarsten Otte 	}
136514eebd91SCarsten Otte 
136614eebd91SCarsten Otte 	return r;
136714eebd91SCarsten Otte }
136814eebd91SCarsten Otte 
136914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
137014eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
137114eebd91SCarsten Otte {
137214eebd91SCarsten Otte 	int r = -EINVAL;
137314eebd91SCarsten Otte 
137414eebd91SCarsten Otte 	switch (reg->id) {
137529b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
137629b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
137729b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
137829b7c71bSCarsten Otte 		break;
137929b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
138029b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
138129b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
138229b7c71bSCarsten Otte 		break;
138346a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
138446a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
138546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
138646a6dd1cSJason J. herne 		break;
138746a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
138846a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
138946a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
139046a6dd1cSJason J. herne 		break;
1391536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1392536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1393536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
13949fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
13959fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1396536336c2SDominik Dingel 		break;
1397536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1398536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1399536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1400536336c2SDominik Dingel 		break;
1401536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1402536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1403536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1404536336c2SDominik Dingel 		break;
1405672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1406672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1407672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1408672550fbSChristian Borntraeger 		break;
1409afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1410afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1411afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1412afa45ff5SChristian Borntraeger 		break;
141314eebd91SCarsten Otte 	default:
141414eebd91SCarsten Otte 		break;
141514eebd91SCarsten Otte 	}
141614eebd91SCarsten Otte 
141714eebd91SCarsten Otte 	return r;
141814eebd91SCarsten Otte }
1419b6d33834SChristoffer Dall 
1420b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1421b0c632dbSHeiko Carstens {
1422b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1423b0c632dbSHeiko Carstens 	return 0;
1424b0c632dbSHeiko Carstens }
1425b0c632dbSHeiko Carstens 
1426b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1427b0c632dbSHeiko Carstens {
14285a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1429b0c632dbSHeiko Carstens 	return 0;
1430b0c632dbSHeiko Carstens }
1431b0c632dbSHeiko Carstens 
1432b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1433b0c632dbSHeiko Carstens {
14345a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1435b0c632dbSHeiko Carstens 	return 0;
1436b0c632dbSHeiko Carstens }
1437b0c632dbSHeiko Carstens 
1438b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1439b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1440b0c632dbSHeiko Carstens {
144159674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1442b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
144359674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1444b0c632dbSHeiko Carstens 	return 0;
1445b0c632dbSHeiko Carstens }
1446b0c632dbSHeiko Carstens 
1447b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1448b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1449b0c632dbSHeiko Carstens {
145059674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1451b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1452b0c632dbSHeiko Carstens 	return 0;
1453b0c632dbSHeiko Carstens }
1454b0c632dbSHeiko Carstens 
1455b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1456b0c632dbSHeiko Carstens {
14574725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
14584725c860SMartin Schwidefsky 		return -EINVAL;
1459b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
14604725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
14614725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
14624725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1463b0c632dbSHeiko Carstens 	return 0;
1464b0c632dbSHeiko Carstens }
1465b0c632dbSHeiko Carstens 
1466b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1467b0c632dbSHeiko Carstens {
1468b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1469b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1470b0c632dbSHeiko Carstens 	return 0;
1471b0c632dbSHeiko Carstens }
1472b0c632dbSHeiko Carstens 
1473b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1474b0c632dbSHeiko Carstens {
1475b0c632dbSHeiko Carstens 	int rc = 0;
1476b0c632dbSHeiko Carstens 
14777a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1478b0c632dbSHeiko Carstens 		rc = -EBUSY;
1479d7b0b5ebSCarsten Otte 	else {
1480d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1481d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1482d7b0b5ebSCarsten Otte 	}
1483b0c632dbSHeiko Carstens 	return rc;
1484b0c632dbSHeiko Carstens }
1485b0c632dbSHeiko Carstens 
1486b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1487b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1488b0c632dbSHeiko Carstens {
1489b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1490b0c632dbSHeiko Carstens }
1491b0c632dbSHeiko Carstens 
149227291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
149327291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
149427291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
149527291e21SDavid Hildenbrand 
1496d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1497d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1498b0c632dbSHeiko Carstens {
149927291e21SDavid Hildenbrand 	int rc = 0;
150027291e21SDavid Hildenbrand 
150127291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
150227291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
150327291e21SDavid Hildenbrand 
15042de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
150527291e21SDavid Hildenbrand 		return -EINVAL;
150627291e21SDavid Hildenbrand 
150727291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
150827291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
150927291e21SDavid Hildenbrand 		/* enforce guest PER */
151027291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
151127291e21SDavid Hildenbrand 
151227291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
151327291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
151427291e21SDavid Hildenbrand 	} else {
151527291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
151627291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
151727291e21SDavid Hildenbrand 	}
151827291e21SDavid Hildenbrand 
151927291e21SDavid Hildenbrand 	if (rc) {
152027291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
152127291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
152227291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
152327291e21SDavid Hildenbrand 	}
152427291e21SDavid Hildenbrand 
152527291e21SDavid Hildenbrand 	return rc;
1526b0c632dbSHeiko Carstens }
1527b0c632dbSHeiko Carstens 
152862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
152962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
153062d9f0dbSMarcelo Tosatti {
15316352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
15326352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
15336352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
153462d9f0dbSMarcelo Tosatti }
153562d9f0dbSMarcelo Tosatti 
153662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
153762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
153862d9f0dbSMarcelo Tosatti {
15396352e4d2SDavid Hildenbrand 	int rc = 0;
15406352e4d2SDavid Hildenbrand 
15416352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
15426352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
15436352e4d2SDavid Hildenbrand 
15446352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
15456352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
15466352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
15476352e4d2SDavid Hildenbrand 		break;
15486352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
15496352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
15506352e4d2SDavid Hildenbrand 		break;
15516352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
15526352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
15536352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
15546352e4d2SDavid Hildenbrand 	default:
15556352e4d2SDavid Hildenbrand 		rc = -ENXIO;
15566352e4d2SDavid Hildenbrand 	}
15576352e4d2SDavid Hildenbrand 
15586352e4d2SDavid Hildenbrand 	return rc;
155962d9f0dbSMarcelo Tosatti }
156062d9f0dbSMarcelo Tosatti 
1561b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1562b31605c1SDominik Dingel {
1563b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1564b31605c1SDominik Dingel 		return false;
1565b31605c1SDominik Dingel 	/* only enable for z10 and later */
1566b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1567b31605c1SDominik Dingel 		return false;
1568b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1569b31605c1SDominik Dingel 		return false;
1570b31605c1SDominik Dingel 	return true;
1571b31605c1SDominik Dingel }
1572b31605c1SDominik Dingel 
15738ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
15748ad35755SDavid Hildenbrand {
15758ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
15768ad35755SDavid Hildenbrand }
15778ad35755SDavid Hildenbrand 
15782c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
15792c70fe44SChristian Borntraeger {
15808ad35755SDavid Hildenbrand retry:
15818ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
15822c70fe44SChristian Borntraeger 	/*
15832c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
15842c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
15852c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
15862c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
15872c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
15882c70fe44SChristian Borntraeger 	 */
15898ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
15902c70fe44SChristian Borntraeger 		int rc;
15912c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1592fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
15932c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
15942c70fe44SChristian Borntraeger 		if (rc)
15952c70fe44SChristian Borntraeger 			return rc;
15968ad35755SDavid Hildenbrand 		goto retry;
15972c70fe44SChristian Borntraeger 	}
15988ad35755SDavid Hildenbrand 
1599d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1600d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1601d3d692c8SDavid Hildenbrand 		goto retry;
1602d3d692c8SDavid Hildenbrand 	}
1603d3d692c8SDavid Hildenbrand 
16048ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
16058ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
16068ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
16078ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
16088ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
16098ad35755SDavid Hildenbrand 		}
16108ad35755SDavid Hildenbrand 		goto retry;
16118ad35755SDavid Hildenbrand 	}
16128ad35755SDavid Hildenbrand 
16138ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
16148ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
16158ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
16168ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
16178ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
16188ad35755SDavid Hildenbrand 		}
16198ad35755SDavid Hildenbrand 		goto retry;
16208ad35755SDavid Hildenbrand 	}
16218ad35755SDavid Hildenbrand 
16220759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
16230759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
16240759d068SDavid Hildenbrand 
16252c70fe44SChristian Borntraeger 	return 0;
16262c70fe44SChristian Borntraeger }
16272c70fe44SChristian Borntraeger 
1628fa576c58SThomas Huth /**
1629fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1630fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1631fa576c58SThomas Huth  * @gpa: Guest physical address
1632fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1633fa576c58SThomas Huth  *
1634fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1635fa576c58SThomas Huth  *
1636fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1637fa576c58SThomas Huth  */
1638fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
163924eb3a82SDominik Dingel {
1640527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1641527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
164224eb3a82SDominik Dingel }
164324eb3a82SDominik Dingel 
16443c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
16453c038e6bSDominik Dingel 				      unsigned long token)
16463c038e6bSDominik Dingel {
16473c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1648383d0b05SJens Freimann 	struct kvm_s390_irq irq;
16493c038e6bSDominik Dingel 
16503c038e6bSDominik Dingel 	if (start_token) {
1651383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1652383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1653383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
16543c038e6bSDominik Dingel 	} else {
16553c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1656383d0b05SJens Freimann 		inti.parm64 = token;
16573c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
16583c038e6bSDominik Dingel 	}
16593c038e6bSDominik Dingel }
16603c038e6bSDominik Dingel 
16613c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
16623c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
16633c038e6bSDominik Dingel {
16643c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
16653c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
16663c038e6bSDominik Dingel }
16673c038e6bSDominik Dingel 
16683c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
16693c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
16703c038e6bSDominik Dingel {
16713c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
16723c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
16733c038e6bSDominik Dingel }
16743c038e6bSDominik Dingel 
16753c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
16763c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
16773c038e6bSDominik Dingel {
16783c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
16793c038e6bSDominik Dingel }
16803c038e6bSDominik Dingel 
16813c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
16823c038e6bSDominik Dingel {
16833c038e6bSDominik Dingel 	/*
16843c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
16853c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
16863c038e6bSDominik Dingel 	 */
16873c038e6bSDominik Dingel 	return true;
16883c038e6bSDominik Dingel }
16893c038e6bSDominik Dingel 
16903c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
16913c038e6bSDominik Dingel {
16923c038e6bSDominik Dingel 	hva_t hva;
16933c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
16943c038e6bSDominik Dingel 	int rc;
16953c038e6bSDominik Dingel 
16963c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
16973c038e6bSDominik Dingel 		return 0;
16983c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
16993c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
17003c038e6bSDominik Dingel 		return 0;
17013c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
17023c038e6bSDominik Dingel 		return 0;
17039a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
17043c038e6bSDominik Dingel 		return 0;
17053c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
17063c038e6bSDominik Dingel 		return 0;
17073c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
17083c038e6bSDominik Dingel 		return 0;
17093c038e6bSDominik Dingel 
171081480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
171181480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
171281480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
17133c038e6bSDominik Dingel 		return 0;
17143c038e6bSDominik Dingel 
17153c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
17163c038e6bSDominik Dingel 	return rc;
17173c038e6bSDominik Dingel }
17183c038e6bSDominik Dingel 
17193fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1720b0c632dbSHeiko Carstens {
17213fb4c40fSThomas Huth 	int rc, cpuflags;
1722e168bf8dSCarsten Otte 
17233c038e6bSDominik Dingel 	/*
17243c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
17253c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
17263c038e6bSDominik Dingel 	 * handled outside the worker.
17273c038e6bSDominik Dingel 	 */
17283c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
17293c038e6bSDominik Dingel 
17305a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1731b0c632dbSHeiko Carstens 
1732b0c632dbSHeiko Carstens 	if (need_resched())
1733b0c632dbSHeiko Carstens 		schedule();
1734b0c632dbSHeiko Carstens 
1735d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
173671cde587SChristian Borntraeger 		s390_handle_mcck();
173771cde587SChristian Borntraeger 
173879395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
173979395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
174079395031SJens Freimann 		if (rc)
174179395031SJens Freimann 			return rc;
174279395031SJens Freimann 	}
17430ff31867SCarsten Otte 
17442c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
17452c70fe44SChristian Borntraeger 	if (rc)
17462c70fe44SChristian Borntraeger 		return rc;
17472c70fe44SChristian Borntraeger 
174827291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
174927291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
175027291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
175127291e21SDavid Hildenbrand 	}
175227291e21SDavid Hildenbrand 
1753b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
17543fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
17553fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
17563fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
17572b29a9fdSDominik Dingel 
17583fb4c40fSThomas Huth 	return 0;
17593fb4c40fSThomas Huth }
17603fb4c40fSThomas Huth 
1761492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1762492d8642SThomas Huth {
1763492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1764492d8642SThomas Huth 	u8 opcode;
1765492d8642SThomas Huth 	int rc;
1766492d8642SThomas Huth 
1767492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1768492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
1769492d8642SThomas Huth 
1770492d8642SThomas Huth 	/*
1771492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
1772492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
1773492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
1774492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
1775492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
1776492d8642SThomas Huth 	 * to be able to forward the PSW.
1777492d8642SThomas Huth 	 */
1778492d8642SThomas Huth 	rc = read_guest(vcpu, psw->addr, &opcode, 1);
1779492d8642SThomas Huth 	if (rc)
1780492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
1781492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1782492d8642SThomas Huth 
1783492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1784492d8642SThomas Huth }
1785492d8642SThomas Huth 
17863fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
17873fb4c40fSThomas Huth {
178824eb3a82SDominik Dingel 	int rc = -1;
17892b29a9fdSDominik Dingel 
17902b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
17912b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
17922b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
17932b29a9fdSDominik Dingel 
179427291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
179527291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
179627291e21SDavid Hildenbrand 
17973fb4c40fSThomas Huth 	if (exit_reason >= 0) {
17987c470539SMartin Schwidefsky 		rc = 0;
1799210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1800210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1801210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1802210b1607SThomas Huth 						current->thread.gmap_addr;
1803210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1804210b1607SThomas Huth 		rc = -EREMOTE;
180524eb3a82SDominik Dingel 
180624eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
18073c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
180824eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1809fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
181024eb3a82SDominik Dingel 			rc = 0;
1811fa576c58SThomas Huth 		} else {
1812fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1813fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1814fa576c58SThomas Huth 		}
181524eb3a82SDominik Dingel 	}
181624eb3a82SDominik Dingel 
1817492d8642SThomas Huth 	if (rc == -1)
1818492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
1819b0c632dbSHeiko Carstens 
18205a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
18213fb4c40fSThomas Huth 
1822a76ccff6SThomas Huth 	if (rc == 0) {
1823a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
18242955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
18252955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1826a76ccff6SThomas Huth 		else
1827a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1828a76ccff6SThomas Huth 	}
1829a76ccff6SThomas Huth 
18303fb4c40fSThomas Huth 	return rc;
18313fb4c40fSThomas Huth }
18323fb4c40fSThomas Huth 
18333fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
18343fb4c40fSThomas Huth {
18353fb4c40fSThomas Huth 	int rc, exit_reason;
18363fb4c40fSThomas Huth 
1837800c1065SThomas Huth 	/*
1838800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1839800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1840800c1065SThomas Huth 	 */
1841800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1842800c1065SThomas Huth 
1843a76ccff6SThomas Huth 	do {
18443fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
18453fb4c40fSThomas Huth 		if (rc)
1846a76ccff6SThomas Huth 			break;
18473fb4c40fSThomas Huth 
1848800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
18493fb4c40fSThomas Huth 		/*
1850a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1851a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
18523fb4c40fSThomas Huth 		 */
18533fb4c40fSThomas Huth 		preempt_disable();
18543fb4c40fSThomas Huth 		kvm_guest_enter();
18553fb4c40fSThomas Huth 		preempt_enable();
1856a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1857a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
18583fb4c40fSThomas Huth 		kvm_guest_exit();
1859800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
18603fb4c40fSThomas Huth 
18613fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
186227291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
18633fb4c40fSThomas Huth 
1864800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1865e168bf8dSCarsten Otte 	return rc;
1866b0c632dbSHeiko Carstens }
1867b0c632dbSHeiko Carstens 
1868b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1869b028ee3eSDavid Hildenbrand {
1870b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1871b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1872b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1873b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1874b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1875b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1876d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1877d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1878b028ee3eSDavid Hildenbrand 	}
1879b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1880b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1881b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1882b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1883b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1884b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1885b028ee3eSDavid Hildenbrand 	}
1886b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1887b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1888b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1889b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
18909fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
18919fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1892b028ee3eSDavid Hildenbrand 	}
1893b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1894b028ee3eSDavid Hildenbrand }
1895b028ee3eSDavid Hildenbrand 
1896b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1897b028ee3eSDavid Hildenbrand {
1898b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1899b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1900b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1901b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1902b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1903b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1904b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1905b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1906b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1907b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1908b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1909b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1910b028ee3eSDavid Hildenbrand }
1911b028ee3eSDavid Hildenbrand 
1912b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1913b0c632dbSHeiko Carstens {
19148f2abe6aSChristian Borntraeger 	int rc;
1915b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1916b0c632dbSHeiko Carstens 
191727291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
191827291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
191927291e21SDavid Hildenbrand 		return 0;
192027291e21SDavid Hildenbrand 	}
192127291e21SDavid Hildenbrand 
1922b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1923b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1924b0c632dbSHeiko Carstens 
19256352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
19266852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
19276352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
19286352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
19296352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
19306352e4d2SDavid Hildenbrand 		return -EINVAL;
19316352e4d2SDavid Hildenbrand 	}
1932b0c632dbSHeiko Carstens 
1933b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1934d7b0b5ebSCarsten Otte 
1935dab4079dSHeiko Carstens 	might_fault();
1936e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
19379ace903dSChristian Ehrhardt 
1938b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1939b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
19408f2abe6aSChristian Borntraeger 		rc = -EINTR;
1941b1d16c49SChristian Ehrhardt 	}
19428f2abe6aSChristian Borntraeger 
194327291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
194427291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
194527291e21SDavid Hildenbrand 		rc = 0;
194627291e21SDavid Hildenbrand 	}
194727291e21SDavid Hildenbrand 
1948b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
19498f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
19508f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
19518f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
19528f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
19538f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
19548f2abe6aSChristian Borntraeger 		rc = 0;
19558f2abe6aSChristian Borntraeger 	}
19568f2abe6aSChristian Borntraeger 
19578f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
19588f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
19598f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
19608f2abe6aSChristian Borntraeger 		rc = 0;
19618f2abe6aSChristian Borntraeger 	}
19628f2abe6aSChristian Borntraeger 
1963b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1964d7b0b5ebSCarsten Otte 
1965b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1966b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1967b0c632dbSHeiko Carstens 
1968b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
19697e8e6ab4SHeiko Carstens 	return rc;
1970b0c632dbSHeiko Carstens }
1971b0c632dbSHeiko Carstens 
1972b0c632dbSHeiko Carstens /*
1973b0c632dbSHeiko Carstens  * store status at address
1974b0c632dbSHeiko Carstens  * we use have two special cases:
1975b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1976b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1977b0c632dbSHeiko Carstens  */
1978d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1979b0c632dbSHeiko Carstens {
1980092670cdSCarsten Otte 	unsigned char archmode = 1;
1981fda902cbSMichael Mueller 	unsigned int px;
1982178bd789SThomas Huth 	u64 clkcomp;
1983d0bce605SHeiko Carstens 	int rc;
1984b0c632dbSHeiko Carstens 
1985d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1986d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1987b0c632dbSHeiko Carstens 			return -EFAULT;
1988d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1989d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1990d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1991b0c632dbSHeiko Carstens 			return -EFAULT;
1992d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1993d0bce605SHeiko Carstens 	}
1994d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1995d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
1996d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1997d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
1998d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1999d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2000fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2001d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2002fda902cbSMichael Mueller 			      &px, 4);
2003d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2004d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2005d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2006d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2007d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2008d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2009d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2010178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2011d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2012d0bce605SHeiko Carstens 			      &clkcomp, 8);
2013d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2014d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2015d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2016d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2017d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2018b0c632dbSHeiko Carstens }
2019b0c632dbSHeiko Carstens 
2020e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2021e879892cSThomas Huth {
2022e879892cSThomas Huth 	/*
2023e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2024e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2025e879892cSThomas Huth 	 * it into the save area
2026e879892cSThomas Huth 	 */
2027e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2028e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2029e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2030e879892cSThomas Huth 
2031e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2032e879892cSThomas Huth }
2033e879892cSThomas Huth 
2034*bc17de7cSEric Farman /*
2035*bc17de7cSEric Farman  * store additional status at address
2036*bc17de7cSEric Farman  */
2037*bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2038*bc17de7cSEric Farman 					unsigned long gpa)
2039*bc17de7cSEric Farman {
2040*bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2041*bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2042*bc17de7cSEric Farman 		return 0;
2043*bc17de7cSEric Farman 
2044*bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2045*bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2046*bc17de7cSEric Farman }
2047*bc17de7cSEric Farman 
2048*bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2049*bc17de7cSEric Farman {
2050*bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2051*bc17de7cSEric Farman 		return 0;
2052*bc17de7cSEric Farman 
2053*bc17de7cSEric Farman 	/*
2054*bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2055*bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2056*bc17de7cSEric Farman 	 * it into the save area.
2057*bc17de7cSEric Farman 	 */
2058*bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2059*bc17de7cSEric Farman 
2060*bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2061*bc17de7cSEric Farman }
2062*bc17de7cSEric Farman 
20638ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20648ad35755SDavid Hildenbrand {
20658ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
20668ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
20678ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20688ad35755SDavid Hildenbrand }
20698ad35755SDavid Hildenbrand 
20708ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
20718ad35755SDavid Hildenbrand {
20728ad35755SDavid Hildenbrand 	unsigned int i;
20738ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
20748ad35755SDavid Hildenbrand 
20758ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
20768ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
20778ad35755SDavid Hildenbrand 	}
20788ad35755SDavid Hildenbrand }
20798ad35755SDavid Hildenbrand 
20808ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20818ad35755SDavid Hildenbrand {
20828ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
20838ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
20848ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20858ad35755SDavid Hildenbrand }
20868ad35755SDavid Hildenbrand 
20876852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
20886852d7b6SDavid Hildenbrand {
20898ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
20908ad35755SDavid Hildenbrand 
20918ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
20928ad35755SDavid Hildenbrand 		return;
20938ad35755SDavid Hildenbrand 
20946852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
20958ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2096433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
20978ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
20988ad35755SDavid Hildenbrand 
20998ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21008ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
21018ad35755SDavid Hildenbrand 			started_vcpus++;
21028ad35755SDavid Hildenbrand 	}
21038ad35755SDavid Hildenbrand 
21048ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
21058ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
21068ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
21078ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
21088ad35755SDavid Hildenbrand 		/*
21098ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
21108ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
21118ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
21128ad35755SDavid Hildenbrand 		 */
21138ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
21148ad35755SDavid Hildenbrand 	}
21158ad35755SDavid Hildenbrand 
21166852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21178ad35755SDavid Hildenbrand 	/*
21188ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
21198ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
21208ad35755SDavid Hildenbrand 	 */
2121d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2122433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21238ad35755SDavid Hildenbrand 	return;
21246852d7b6SDavid Hildenbrand }
21256852d7b6SDavid Hildenbrand 
21266852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
21276852d7b6SDavid Hildenbrand {
21288ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
21298ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
21308ad35755SDavid Hildenbrand 
21318ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
21328ad35755SDavid Hildenbrand 		return;
21338ad35755SDavid Hildenbrand 
21346852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
21358ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2136433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21378ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21388ad35755SDavid Hildenbrand 
213932f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
21406cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
214132f5ff63SDavid Hildenbrand 
21426cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21438ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
21448ad35755SDavid Hildenbrand 
21458ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21468ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
21478ad35755SDavid Hildenbrand 			started_vcpus++;
21488ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
21498ad35755SDavid Hildenbrand 		}
21508ad35755SDavid Hildenbrand 	}
21518ad35755SDavid Hildenbrand 
21528ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
21538ad35755SDavid Hildenbrand 		/*
21548ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
21558ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
21568ad35755SDavid Hildenbrand 		 */
21578ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
21588ad35755SDavid Hildenbrand 	}
21598ad35755SDavid Hildenbrand 
2160433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21618ad35755SDavid Hildenbrand 	return;
21626852d7b6SDavid Hildenbrand }
21636852d7b6SDavid Hildenbrand 
2164d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2165d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2166d6712df9SCornelia Huck {
2167d6712df9SCornelia Huck 	int r;
2168d6712df9SCornelia Huck 
2169d6712df9SCornelia Huck 	if (cap->flags)
2170d6712df9SCornelia Huck 		return -EINVAL;
2171d6712df9SCornelia Huck 
2172d6712df9SCornelia Huck 	switch (cap->cap) {
2173fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2174fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2175fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2176fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2177fa6b7fe9SCornelia Huck 		}
2178fa6b7fe9SCornelia Huck 		r = 0;
2179fa6b7fe9SCornelia Huck 		break;
2180d6712df9SCornelia Huck 	default:
2181d6712df9SCornelia Huck 		r = -EINVAL;
2182d6712df9SCornelia Huck 		break;
2183d6712df9SCornelia Huck 	}
2184d6712df9SCornelia Huck 	return r;
2185d6712df9SCornelia Huck }
2186d6712df9SCornelia Huck 
2187b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2188b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2189b0c632dbSHeiko Carstens {
2190b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2191b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2192800c1065SThomas Huth 	int idx;
2193bc923cc9SAvi Kivity 	long r;
2194b0c632dbSHeiko Carstens 
219593736624SAvi Kivity 	switch (ioctl) {
219693736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2197ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2198383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2199ba5c1e9bSCarsten Otte 
220093736624SAvi Kivity 		r = -EFAULT;
2201ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
220293736624SAvi Kivity 			break;
2203383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2204383d0b05SJens Freimann 			return -EINVAL;
2205383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
220693736624SAvi Kivity 		break;
2207ba5c1e9bSCarsten Otte 	}
2208b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2209800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2210bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2211800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2212bc923cc9SAvi Kivity 		break;
2213b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2214b0c632dbSHeiko Carstens 		psw_t psw;
2215b0c632dbSHeiko Carstens 
2216bc923cc9SAvi Kivity 		r = -EFAULT;
2217b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2218bc923cc9SAvi Kivity 			break;
2219bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2220bc923cc9SAvi Kivity 		break;
2221b0c632dbSHeiko Carstens 	}
2222b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2223bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2224bc923cc9SAvi Kivity 		break;
222514eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
222614eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
222714eebd91SCarsten Otte 		struct kvm_one_reg reg;
222814eebd91SCarsten Otte 		r = -EFAULT;
222914eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
223014eebd91SCarsten Otte 			break;
223114eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
223214eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
223314eebd91SCarsten Otte 		else
223414eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
223514eebd91SCarsten Otte 		break;
223614eebd91SCarsten Otte 	}
223727e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
223827e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
223927e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
224027e0393fSCarsten Otte 
224127e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
224227e0393fSCarsten Otte 			r = -EFAULT;
224327e0393fSCarsten Otte 			break;
224427e0393fSCarsten Otte 		}
224527e0393fSCarsten Otte 
224627e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
224727e0393fSCarsten Otte 			r = -EINVAL;
224827e0393fSCarsten Otte 			break;
224927e0393fSCarsten Otte 		}
225027e0393fSCarsten Otte 
225127e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
225227e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
225327e0393fSCarsten Otte 		break;
225427e0393fSCarsten Otte 	}
225527e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
225627e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
225727e0393fSCarsten Otte 
225827e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
225927e0393fSCarsten Otte 			r = -EFAULT;
226027e0393fSCarsten Otte 			break;
226127e0393fSCarsten Otte 		}
226227e0393fSCarsten Otte 
226327e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
226427e0393fSCarsten Otte 			r = -EINVAL;
226527e0393fSCarsten Otte 			break;
226627e0393fSCarsten Otte 		}
226727e0393fSCarsten Otte 
226827e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
226927e0393fSCarsten Otte 			ucasmap.length);
227027e0393fSCarsten Otte 		break;
227127e0393fSCarsten Otte 	}
227227e0393fSCarsten Otte #endif
2273ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2274527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2275ccc7910fSCarsten Otte 		break;
2276ccc7910fSCarsten Otte 	}
2277d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2278d6712df9SCornelia Huck 	{
2279d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2280d6712df9SCornelia Huck 		r = -EFAULT;
2281d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2282d6712df9SCornelia Huck 			break;
2283d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2284d6712df9SCornelia Huck 		break;
2285d6712df9SCornelia Huck 	}
2286b0c632dbSHeiko Carstens 	default:
22873e6afcf1SCarsten Otte 		r = -ENOTTY;
2288b0c632dbSHeiko Carstens 	}
2289bc923cc9SAvi Kivity 	return r;
2290b0c632dbSHeiko Carstens }
2291b0c632dbSHeiko Carstens 
22925b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
22935b1c1493SCarsten Otte {
22945b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
22955b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
22965b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
22975b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
22985b1c1493SCarsten Otte 		get_page(vmf->page);
22995b1c1493SCarsten Otte 		return 0;
23005b1c1493SCarsten Otte 	}
23015b1c1493SCarsten Otte #endif
23025b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
23035b1c1493SCarsten Otte }
23045b1c1493SCarsten Otte 
23055587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
23065587027cSAneesh Kumar K.V 			    unsigned long npages)
2307db3fe4ebSTakuya Yoshikawa {
2308db3fe4ebSTakuya Yoshikawa 	return 0;
2309db3fe4ebSTakuya Yoshikawa }
2310db3fe4ebSTakuya Yoshikawa 
2311b0c632dbSHeiko Carstens /* Section: memory related */
2312f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2313f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
23147b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
23157b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2316b0c632dbSHeiko Carstens {
2317dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2318dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2319dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2320dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2321b0c632dbSHeiko Carstens 
2322598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2323b0c632dbSHeiko Carstens 		return -EINVAL;
2324b0c632dbSHeiko Carstens 
2325598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2326b0c632dbSHeiko Carstens 		return -EINVAL;
2327b0c632dbSHeiko Carstens 
2328f7784b8eSMarcelo Tosatti 	return 0;
2329f7784b8eSMarcelo Tosatti }
2330f7784b8eSMarcelo Tosatti 
2331f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
2332f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
23338482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
23348482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2335f7784b8eSMarcelo Tosatti {
2336f7850c92SCarsten Otte 	int rc;
2337f7784b8eSMarcelo Tosatti 
23382cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
23392cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
23402cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
23412cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
23422cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
23432cef4debSChristian Borntraeger 	 */
23442cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
23452cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
23462cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
23472cef4debSChristian Borntraeger 		return;
2348598841caSCarsten Otte 
2349598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2350598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2351598841caSCarsten Otte 	if (rc)
2352f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2353598841caSCarsten Otte 	return;
2354b0c632dbSHeiko Carstens }
2355b0c632dbSHeiko Carstens 
2356b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2357b0c632dbSHeiko Carstens {
23589d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2359b0c632dbSHeiko Carstens }
2360b0c632dbSHeiko Carstens 
2361b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2362b0c632dbSHeiko Carstens {
2363b0c632dbSHeiko Carstens 	kvm_exit();
2364b0c632dbSHeiko Carstens }
2365b0c632dbSHeiko Carstens 
2366b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2367b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2368566af940SCornelia Huck 
2369566af940SCornelia Huck /*
2370566af940SCornelia Huck  * Enable autoloading of the kvm module.
2371566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2372566af940SCornelia Huck  * since x86 takes a different approach.
2373566af940SCornelia Huck  */
2374566af940SCornelia Huck #include <linux/miscdevice.h>
2375566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2376566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2377