xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 13211ea7b47db3d8ee2ff258a9a973a6d3aa3d43)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
29b0c632dbSHeiko Carstens #include <asm/lowcore.h>
30b0c632dbSHeiko Carstens #include <asm/pgtable.h>
31f5daba1dSHeiko Carstens #include <asm/nmi.h>
32a0616cdeSDavid Howells #include <asm/switch_to.h>
331526bf9cSChristian Borntraeger #include <asm/sclp.h>
348f2abe6aSChristian Borntraeger #include "kvm-s390.h"
35b0c632dbSHeiko Carstens #include "gaccess.h"
36b0c632dbSHeiko Carstens 
375786fffaSCornelia Huck #define CREATE_TRACE_POINTS
385786fffaSCornelia Huck #include "trace.h"
39ade38c31SCornelia Huck #include "trace-s390.h"
405786fffaSCornelia Huck 
41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42b0c632dbSHeiko Carstens 
43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
44b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
450eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
468f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
478f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
488f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
498f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
51ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
54ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
55f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
56ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
57aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
58aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
59ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
607697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
61ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
6869d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
69453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
70453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
71453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
72453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
73453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
748a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
75453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
76453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
77b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
78453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
79453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
80bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
815288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
82bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
837697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
845288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
8542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
8642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
875288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
8842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
8942cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
90cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
915288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
925288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
935288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
9442cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
9542cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
9642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
97388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
98e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
9941628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
100b0c632dbSHeiko Carstens 	{ NULL }
101b0c632dbSHeiko Carstens };
102b0c632dbSHeiko Carstens 
1039d8d5786SMichael Mueller /* upper facilities limit for kvm */
1049d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
1059d8d5786SMichael Mueller 	0xff82fffbf4fc2000UL,
1069d8d5786SMichael Mueller 	0x005c000000000000UL,
107*13211ea7SEric Farman 	0x4000000000000000UL,
1089d8d5786SMichael Mueller };
109b0c632dbSHeiko Carstens 
1109d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
11178c4b59fSMichael Mueller {
1129d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1139d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
11478c4b59fSMichael Mueller }
11578c4b59fSMichael Mueller 
1169d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
1179d8d5786SMichael Mueller 
118b0c632dbSHeiko Carstens /* Section: not file related */
11913a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
120b0c632dbSHeiko Carstens {
121b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
12210474ae8SAlexander Graf 	return 0;
123b0c632dbSHeiko Carstens }
124b0c632dbSHeiko Carstens 
1252c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1262c70fe44SChristian Borntraeger 
127b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
128b0c632dbSHeiko Carstens {
1292c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1302c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
131b0c632dbSHeiko Carstens 	return 0;
132b0c632dbSHeiko Carstens }
133b0c632dbSHeiko Carstens 
134b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
135b0c632dbSHeiko Carstens {
1362c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
137b0c632dbSHeiko Carstens }
138b0c632dbSHeiko Carstens 
139b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
140b0c632dbSHeiko Carstens {
14184877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
14284877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
143b0c632dbSHeiko Carstens }
144b0c632dbSHeiko Carstens 
145b0c632dbSHeiko Carstens /* Section: device related */
146b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
147b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
148b0c632dbSHeiko Carstens {
149b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
150b0c632dbSHeiko Carstens 		return s390_enable_sie();
151b0c632dbSHeiko Carstens 	return -EINVAL;
152b0c632dbSHeiko Carstens }
153b0c632dbSHeiko Carstens 
154784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
155b0c632dbSHeiko Carstens {
156d7b0b5ebSCarsten Otte 	int r;
157d7b0b5ebSCarsten Otte 
1582bd0ac4eSCarsten Otte 	switch (ext) {
159d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
160b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
16152e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
1621efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1631efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
1641efd0f59SCarsten Otte #endif
1653c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
16660b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
16714eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
168d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
169fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
170ebc32262SCornelia Huck 	case KVM_CAP_IRQFD:
17110ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
172c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
173d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
17478599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
175f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
1766352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
1772444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
178d7b0b5ebSCarsten Otte 		r = 1;
179d7b0b5ebSCarsten Otte 		break;
180e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
181e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
182e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
183e726b1bdSChristian Borntraeger 		break;
184e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
185e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
186e1e2e605SNick Wang 		break;
1871526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
188abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
1891526bf9cSChristian Borntraeger 		break;
19068c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
19168c55750SEric Farman 		r = MACHINE_HAS_VX;
19268c55750SEric Farman 		break;
1932bd0ac4eSCarsten Otte 	default:
194d7b0b5ebSCarsten Otte 		r = 0;
195b0c632dbSHeiko Carstens 	}
196d7b0b5ebSCarsten Otte 	return r;
1972bd0ac4eSCarsten Otte }
198b0c632dbSHeiko Carstens 
19915f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
20015f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
20115f36ebdSJason J. Herne {
20215f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
20315f36ebdSJason J. Herne 	unsigned long address;
20415f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
20515f36ebdSJason J. Herne 
20615f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
20715f36ebdSJason J. Herne 	/* Loop over all guest pages */
20815f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
20915f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
21015f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
21115f36ebdSJason J. Herne 
21215f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
21315f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
21415f36ebdSJason J. Herne 	}
21515f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
21615f36ebdSJason J. Herne }
21715f36ebdSJason J. Herne 
218b0c632dbSHeiko Carstens /* Section: vm related */
219b0c632dbSHeiko Carstens /*
220b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
221b0c632dbSHeiko Carstens  */
222b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
223b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
224b0c632dbSHeiko Carstens {
22515f36ebdSJason J. Herne 	int r;
22615f36ebdSJason J. Herne 	unsigned long n;
22715f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
22815f36ebdSJason J. Herne 	int is_dirty = 0;
22915f36ebdSJason J. Herne 
23015f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
23115f36ebdSJason J. Herne 
23215f36ebdSJason J. Herne 	r = -EINVAL;
23315f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
23415f36ebdSJason J. Herne 		goto out;
23515f36ebdSJason J. Herne 
23615f36ebdSJason J. Herne 	memslot = id_to_memslot(kvm->memslots, log->slot);
23715f36ebdSJason J. Herne 	r = -ENOENT;
23815f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
23915f36ebdSJason J. Herne 		goto out;
24015f36ebdSJason J. Herne 
24115f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
24215f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
24315f36ebdSJason J. Herne 	if (r)
24415f36ebdSJason J. Herne 		goto out;
24515f36ebdSJason J. Herne 
24615f36ebdSJason J. Herne 	/* Clear the dirty log */
24715f36ebdSJason J. Herne 	if (is_dirty) {
24815f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
24915f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
25015f36ebdSJason J. Herne 	}
25115f36ebdSJason J. Herne 	r = 0;
25215f36ebdSJason J. Herne out:
25315f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
25415f36ebdSJason J. Herne 	return r;
255b0c632dbSHeiko Carstens }
256b0c632dbSHeiko Carstens 
257d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
258d938dc55SCornelia Huck {
259d938dc55SCornelia Huck 	int r;
260d938dc55SCornelia Huck 
261d938dc55SCornelia Huck 	if (cap->flags)
262d938dc55SCornelia Huck 		return -EINVAL;
263d938dc55SCornelia Huck 
264d938dc55SCornelia Huck 	switch (cap->cap) {
26584223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
26684223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
26784223598SCornelia Huck 		r = 0;
26884223598SCornelia Huck 		break;
2692444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
2702444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
2712444b352SDavid Hildenbrand 		r = 0;
2722444b352SDavid Hildenbrand 		break;
27368c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
27468c55750SEric Farman 		kvm->arch.use_vectors = MACHINE_HAS_VX;
27568c55750SEric Farman 		r = MACHINE_HAS_VX ? 0 : -EINVAL;
27668c55750SEric Farman 		break;
277d938dc55SCornelia Huck 	default:
278d938dc55SCornelia Huck 		r = -EINVAL;
279d938dc55SCornelia Huck 		break;
280d938dc55SCornelia Huck 	}
281d938dc55SCornelia Huck 	return r;
282d938dc55SCornelia Huck }
283d938dc55SCornelia Huck 
2848c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
2858c0a7ce6SDominik Dingel {
2868c0a7ce6SDominik Dingel 	int ret;
2878c0a7ce6SDominik Dingel 
2888c0a7ce6SDominik Dingel 	switch (attr->attr) {
2898c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
2908c0a7ce6SDominik Dingel 		ret = 0;
2918c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
2928c0a7ce6SDominik Dingel 			ret = -EFAULT;
2938c0a7ce6SDominik Dingel 		break;
2948c0a7ce6SDominik Dingel 	default:
2958c0a7ce6SDominik Dingel 		ret = -ENXIO;
2968c0a7ce6SDominik Dingel 		break;
2978c0a7ce6SDominik Dingel 	}
2988c0a7ce6SDominik Dingel 	return ret;
2998c0a7ce6SDominik Dingel }
3008c0a7ce6SDominik Dingel 
3018c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3024f718eabSDominik Dingel {
3034f718eabSDominik Dingel 	int ret;
3044f718eabSDominik Dingel 	unsigned int idx;
3054f718eabSDominik Dingel 	switch (attr->attr) {
3064f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
3074f718eabSDominik Dingel 		ret = -EBUSY;
3084f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3094f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3104f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
3114f718eabSDominik Dingel 			ret = 0;
3124f718eabSDominik Dingel 		}
3134f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3144f718eabSDominik Dingel 		break;
3154f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
3164f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3174f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
318a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
3194f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
3204f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
3214f718eabSDominik Dingel 		ret = 0;
3224f718eabSDominik Dingel 		break;
3238c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
3248c0a7ce6SDominik Dingel 		unsigned long new_limit;
3258c0a7ce6SDominik Dingel 
3268c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
3278c0a7ce6SDominik Dingel 			return -EINVAL;
3288c0a7ce6SDominik Dingel 
3298c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
3308c0a7ce6SDominik Dingel 			return -EFAULT;
3318c0a7ce6SDominik Dingel 
3328c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
3338c0a7ce6SDominik Dingel 			return -E2BIG;
3348c0a7ce6SDominik Dingel 
3358c0a7ce6SDominik Dingel 		ret = -EBUSY;
3368c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
3378c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3388c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
3398c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
3408c0a7ce6SDominik Dingel 
3418c0a7ce6SDominik Dingel 			if (!new) {
3428c0a7ce6SDominik Dingel 				ret = -ENOMEM;
3438c0a7ce6SDominik Dingel 			} else {
3448c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
3458c0a7ce6SDominik Dingel 				new->private = kvm;
3468c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
3478c0a7ce6SDominik Dingel 				ret = 0;
3488c0a7ce6SDominik Dingel 			}
3498c0a7ce6SDominik Dingel 		}
3508c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
3518c0a7ce6SDominik Dingel 		break;
3528c0a7ce6SDominik Dingel 	}
3534f718eabSDominik Dingel 	default:
3544f718eabSDominik Dingel 		ret = -ENXIO;
3554f718eabSDominik Dingel 		break;
3564f718eabSDominik Dingel 	}
3574f718eabSDominik Dingel 	return ret;
3584f718eabSDominik Dingel }
3594f718eabSDominik Dingel 
360a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
361a374e892STony Krowiak 
362a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
363a374e892STony Krowiak {
364a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
365a374e892STony Krowiak 	int i;
366a374e892STony Krowiak 
3679d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
368a374e892STony Krowiak 		return -EINVAL;
369a374e892STony Krowiak 
370a374e892STony Krowiak 	mutex_lock(&kvm->lock);
371a374e892STony Krowiak 	switch (attr->attr) {
372a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
373a374e892STony Krowiak 		get_random_bytes(
374a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
375a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
376a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
377a374e892STony Krowiak 		break;
378a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
379a374e892STony Krowiak 		get_random_bytes(
380a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
381a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
382a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
383a374e892STony Krowiak 		break;
384a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
385a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
386a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
387a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
388a374e892STony Krowiak 		break;
389a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
390a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
391a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
392a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
393a374e892STony Krowiak 		break;
394a374e892STony Krowiak 	default:
395a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
396a374e892STony Krowiak 		return -ENXIO;
397a374e892STony Krowiak 	}
398a374e892STony Krowiak 
399a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
400a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
401a374e892STony Krowiak 		exit_sie(vcpu);
402a374e892STony Krowiak 	}
403a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
404a374e892STony Krowiak 	return 0;
405a374e892STony Krowiak }
406a374e892STony Krowiak 
40772f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
40872f25020SJason J. Herne {
40972f25020SJason J. Herne 	u8 gtod_high;
41072f25020SJason J. Herne 
41172f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
41272f25020SJason J. Herne 					   sizeof(gtod_high)))
41372f25020SJason J. Herne 		return -EFAULT;
41472f25020SJason J. Herne 
41572f25020SJason J. Herne 	if (gtod_high != 0)
41672f25020SJason J. Herne 		return -EINVAL;
41772f25020SJason J. Herne 
41872f25020SJason J. Herne 	return 0;
41972f25020SJason J. Herne }
42072f25020SJason J. Herne 
42172f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
42272f25020SJason J. Herne {
42372f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
42472f25020SJason J. Herne 	unsigned int vcpu_idx;
42572f25020SJason J. Herne 	u64 host_tod, gtod;
42672f25020SJason J. Herne 	int r;
42772f25020SJason J. Herne 
42872f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
42972f25020SJason J. Herne 		return -EFAULT;
43072f25020SJason J. Herne 
43172f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
43272f25020SJason J. Herne 	if (r)
43372f25020SJason J. Herne 		return r;
43472f25020SJason J. Herne 
43572f25020SJason J. Herne 	mutex_lock(&kvm->lock);
43672f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
43772f25020SJason J. Herne 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
43872f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
43972f25020SJason J. Herne 		exit_sie(cur_vcpu);
44072f25020SJason J. Herne 	}
44172f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
44272f25020SJason J. Herne 	return 0;
44372f25020SJason J. Herne }
44472f25020SJason J. Herne 
44572f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
44672f25020SJason J. Herne {
44772f25020SJason J. Herne 	int ret;
44872f25020SJason J. Herne 
44972f25020SJason J. Herne 	if (attr->flags)
45072f25020SJason J. Herne 		return -EINVAL;
45172f25020SJason J. Herne 
45272f25020SJason J. Herne 	switch (attr->attr) {
45372f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
45472f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
45572f25020SJason J. Herne 		break;
45672f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
45772f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
45872f25020SJason J. Herne 		break;
45972f25020SJason J. Herne 	default:
46072f25020SJason J. Herne 		ret = -ENXIO;
46172f25020SJason J. Herne 		break;
46272f25020SJason J. Herne 	}
46372f25020SJason J. Herne 	return ret;
46472f25020SJason J. Herne }
46572f25020SJason J. Herne 
46672f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
46772f25020SJason J. Herne {
46872f25020SJason J. Herne 	u8 gtod_high = 0;
46972f25020SJason J. Herne 
47072f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
47172f25020SJason J. Herne 					 sizeof(gtod_high)))
47272f25020SJason J. Herne 		return -EFAULT;
47372f25020SJason J. Herne 
47472f25020SJason J. Herne 	return 0;
47572f25020SJason J. Herne }
47672f25020SJason J. Herne 
47772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
47872f25020SJason J. Herne {
47972f25020SJason J. Herne 	u64 host_tod, gtod;
48072f25020SJason J. Herne 	int r;
48172f25020SJason J. Herne 
48272f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
48372f25020SJason J. Herne 	if (r)
48472f25020SJason J. Herne 		return r;
48572f25020SJason J. Herne 
48672f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
48772f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
48872f25020SJason J. Herne 		return -EFAULT;
48972f25020SJason J. Herne 
49072f25020SJason J. Herne 	return 0;
49172f25020SJason J. Herne }
49272f25020SJason J. Herne 
49372f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
49472f25020SJason J. Herne {
49572f25020SJason J. Herne 	int ret;
49672f25020SJason J. Herne 
49772f25020SJason J. Herne 	if (attr->flags)
49872f25020SJason J. Herne 		return -EINVAL;
49972f25020SJason J. Herne 
50072f25020SJason J. Herne 	switch (attr->attr) {
50172f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
50272f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
50372f25020SJason J. Herne 		break;
50472f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
50572f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
50672f25020SJason J. Herne 		break;
50772f25020SJason J. Herne 	default:
50872f25020SJason J. Herne 		ret = -ENXIO;
50972f25020SJason J. Herne 		break;
51072f25020SJason J. Herne 	}
51172f25020SJason J. Herne 	return ret;
51272f25020SJason J. Herne }
51372f25020SJason J. Herne 
514658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
515658b6edaSMichael Mueller {
516658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
517658b6edaSMichael Mueller 	int ret = 0;
518658b6edaSMichael Mueller 
519658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
520658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
521658b6edaSMichael Mueller 		ret = -EBUSY;
522658b6edaSMichael Mueller 		goto out;
523658b6edaSMichael Mueller 	}
524658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
525658b6edaSMichael Mueller 	if (!proc) {
526658b6edaSMichael Mueller 		ret = -ENOMEM;
527658b6edaSMichael Mueller 		goto out;
528658b6edaSMichael Mueller 	}
529658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
530658b6edaSMichael Mueller 			    sizeof(*proc))) {
531658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
532658b6edaSMichael Mueller 		       sizeof(struct cpuid));
533658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
534981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
535658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
536658b6edaSMichael Mueller 	} else
537658b6edaSMichael Mueller 		ret = -EFAULT;
538658b6edaSMichael Mueller 	kfree(proc);
539658b6edaSMichael Mueller out:
540658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
541658b6edaSMichael Mueller 	return ret;
542658b6edaSMichael Mueller }
543658b6edaSMichael Mueller 
544658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
545658b6edaSMichael Mueller {
546658b6edaSMichael Mueller 	int ret = -ENXIO;
547658b6edaSMichael Mueller 
548658b6edaSMichael Mueller 	switch (attr->attr) {
549658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
550658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
551658b6edaSMichael Mueller 		break;
552658b6edaSMichael Mueller 	}
553658b6edaSMichael Mueller 	return ret;
554658b6edaSMichael Mueller }
555658b6edaSMichael Mueller 
556658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
557658b6edaSMichael Mueller {
558658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
559658b6edaSMichael Mueller 	int ret = 0;
560658b6edaSMichael Mueller 
561658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
562658b6edaSMichael Mueller 	if (!proc) {
563658b6edaSMichael Mueller 		ret = -ENOMEM;
564658b6edaSMichael Mueller 		goto out;
565658b6edaSMichael Mueller 	}
566658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
567658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
568981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
569658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
570658b6edaSMichael Mueller 		ret = -EFAULT;
571658b6edaSMichael Mueller 	kfree(proc);
572658b6edaSMichael Mueller out:
573658b6edaSMichael Mueller 	return ret;
574658b6edaSMichael Mueller }
575658b6edaSMichael Mueller 
576658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
577658b6edaSMichael Mueller {
578658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
579658b6edaSMichael Mueller 	int ret = 0;
580658b6edaSMichael Mueller 
581658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
582658b6edaSMichael Mueller 	if (!mach) {
583658b6edaSMichael Mueller 		ret = -ENOMEM;
584658b6edaSMichael Mueller 		goto out;
585658b6edaSMichael Mueller 	}
586658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
587658b6edaSMichael Mueller 	mach->ibc = sclp_get_ibc();
588981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
589981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
590658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
59194422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
592658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
593658b6edaSMichael Mueller 		ret = -EFAULT;
594658b6edaSMichael Mueller 	kfree(mach);
595658b6edaSMichael Mueller out:
596658b6edaSMichael Mueller 	return ret;
597658b6edaSMichael Mueller }
598658b6edaSMichael Mueller 
599658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
600658b6edaSMichael Mueller {
601658b6edaSMichael Mueller 	int ret = -ENXIO;
602658b6edaSMichael Mueller 
603658b6edaSMichael Mueller 	switch (attr->attr) {
604658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
605658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
606658b6edaSMichael Mueller 		break;
607658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
608658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
609658b6edaSMichael Mueller 		break;
610658b6edaSMichael Mueller 	}
611658b6edaSMichael Mueller 	return ret;
612658b6edaSMichael Mueller }
613658b6edaSMichael Mueller 
614f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
615f2061656SDominik Dingel {
616f2061656SDominik Dingel 	int ret;
617f2061656SDominik Dingel 
618f2061656SDominik Dingel 	switch (attr->group) {
6194f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6208c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
6214f718eabSDominik Dingel 		break;
62272f25020SJason J. Herne 	case KVM_S390_VM_TOD:
62372f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
62472f25020SJason J. Herne 		break;
625658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
626658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
627658b6edaSMichael Mueller 		break;
628a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
629a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
630a374e892STony Krowiak 		break;
631f2061656SDominik Dingel 	default:
632f2061656SDominik Dingel 		ret = -ENXIO;
633f2061656SDominik Dingel 		break;
634f2061656SDominik Dingel 	}
635f2061656SDominik Dingel 
636f2061656SDominik Dingel 	return ret;
637f2061656SDominik Dingel }
638f2061656SDominik Dingel 
639f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
640f2061656SDominik Dingel {
6418c0a7ce6SDominik Dingel 	int ret;
6428c0a7ce6SDominik Dingel 
6438c0a7ce6SDominik Dingel 	switch (attr->group) {
6448c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6458c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
6468c0a7ce6SDominik Dingel 		break;
64772f25020SJason J. Herne 	case KVM_S390_VM_TOD:
64872f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
64972f25020SJason J. Herne 		break;
650658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
651658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
652658b6edaSMichael Mueller 		break;
6538c0a7ce6SDominik Dingel 	default:
6548c0a7ce6SDominik Dingel 		ret = -ENXIO;
6558c0a7ce6SDominik Dingel 		break;
6568c0a7ce6SDominik Dingel 	}
6578c0a7ce6SDominik Dingel 
6588c0a7ce6SDominik Dingel 	return ret;
659f2061656SDominik Dingel }
660f2061656SDominik Dingel 
661f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
662f2061656SDominik Dingel {
663f2061656SDominik Dingel 	int ret;
664f2061656SDominik Dingel 
665f2061656SDominik Dingel 	switch (attr->group) {
6664f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
6674f718eabSDominik Dingel 		switch (attr->attr) {
6684f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
6694f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
6708c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
6714f718eabSDominik Dingel 			ret = 0;
6724f718eabSDominik Dingel 			break;
6734f718eabSDominik Dingel 		default:
6744f718eabSDominik Dingel 			ret = -ENXIO;
6754f718eabSDominik Dingel 			break;
6764f718eabSDominik Dingel 		}
6774f718eabSDominik Dingel 		break;
67872f25020SJason J. Herne 	case KVM_S390_VM_TOD:
67972f25020SJason J. Herne 		switch (attr->attr) {
68072f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
68172f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
68272f25020SJason J. Herne 			ret = 0;
68372f25020SJason J. Herne 			break;
68472f25020SJason J. Herne 		default:
68572f25020SJason J. Herne 			ret = -ENXIO;
68672f25020SJason J. Herne 			break;
68772f25020SJason J. Herne 		}
68872f25020SJason J. Herne 		break;
689658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
690658b6edaSMichael Mueller 		switch (attr->attr) {
691658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
692658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
693658b6edaSMichael Mueller 			ret = 0;
694658b6edaSMichael Mueller 			break;
695658b6edaSMichael Mueller 		default:
696658b6edaSMichael Mueller 			ret = -ENXIO;
697658b6edaSMichael Mueller 			break;
698658b6edaSMichael Mueller 		}
699658b6edaSMichael Mueller 		break;
700a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
701a374e892STony Krowiak 		switch (attr->attr) {
702a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
703a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
704a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
705a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
706a374e892STony Krowiak 			ret = 0;
707a374e892STony Krowiak 			break;
708a374e892STony Krowiak 		default:
709a374e892STony Krowiak 			ret = -ENXIO;
710a374e892STony Krowiak 			break;
711a374e892STony Krowiak 		}
712a374e892STony Krowiak 		break;
713f2061656SDominik Dingel 	default:
714f2061656SDominik Dingel 		ret = -ENXIO;
715f2061656SDominik Dingel 		break;
716f2061656SDominik Dingel 	}
717f2061656SDominik Dingel 
718f2061656SDominik Dingel 	return ret;
719f2061656SDominik Dingel }
720f2061656SDominik Dingel 
721b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
722b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
723b0c632dbSHeiko Carstens {
724b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
725b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
726f2061656SDominik Dingel 	struct kvm_device_attr attr;
727b0c632dbSHeiko Carstens 	int r;
728b0c632dbSHeiko Carstens 
729b0c632dbSHeiko Carstens 	switch (ioctl) {
730ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
731ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
732ba5c1e9bSCarsten Otte 
733ba5c1e9bSCarsten Otte 		r = -EFAULT;
734ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
735ba5c1e9bSCarsten Otte 			break;
736ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
737ba5c1e9bSCarsten Otte 		break;
738ba5c1e9bSCarsten Otte 	}
739d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
740d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
741d938dc55SCornelia Huck 		r = -EFAULT;
742d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
743d938dc55SCornelia Huck 			break;
744d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
745d938dc55SCornelia Huck 		break;
746d938dc55SCornelia Huck 	}
74784223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
74884223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
74984223598SCornelia Huck 
75084223598SCornelia Huck 		r = -EINVAL;
75184223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
75284223598SCornelia Huck 			/* Set up dummy routing. */
75384223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
75484223598SCornelia Huck 			kvm_set_irq_routing(kvm, &routing, 0, 0);
75584223598SCornelia Huck 			r = 0;
75684223598SCornelia Huck 		}
75784223598SCornelia Huck 		break;
75884223598SCornelia Huck 	}
759f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
760f2061656SDominik Dingel 		r = -EFAULT;
761f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
762f2061656SDominik Dingel 			break;
763f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
764f2061656SDominik Dingel 		break;
765f2061656SDominik Dingel 	}
766f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
767f2061656SDominik Dingel 		r = -EFAULT;
768f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
769f2061656SDominik Dingel 			break;
770f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
771f2061656SDominik Dingel 		break;
772f2061656SDominik Dingel 	}
773f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
774f2061656SDominik Dingel 		r = -EFAULT;
775f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
776f2061656SDominik Dingel 			break;
777f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
778f2061656SDominik Dingel 		break;
779f2061656SDominik Dingel 	}
780b0c632dbSHeiko Carstens 	default:
781367e1319SAvi Kivity 		r = -ENOTTY;
782b0c632dbSHeiko Carstens 	}
783b0c632dbSHeiko Carstens 
784b0c632dbSHeiko Carstens 	return r;
785b0c632dbSHeiko Carstens }
786b0c632dbSHeiko Carstens 
78745c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
78845c9b47cSTony Krowiak {
78945c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
79086044c8cSChristian Borntraeger 	u32 cc = 0;
79145c9b47cSTony Krowiak 
79286044c8cSChristian Borntraeger 	memset(config, 0, 128);
79345c9b47cSTony Krowiak 	asm volatile(
79445c9b47cSTony Krowiak 		"lgr 0,%1\n"
79545c9b47cSTony Krowiak 		"lgr 2,%2\n"
79645c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
79786044c8cSChristian Borntraeger 		"0: ipm %0\n"
79845c9b47cSTony Krowiak 		"srl %0,28\n"
79986044c8cSChristian Borntraeger 		"1:\n"
80086044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
80186044c8cSChristian Borntraeger 		: "+r" (cc)
80245c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
80345c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
80445c9b47cSTony Krowiak 	);
80545c9b47cSTony Krowiak 
80645c9b47cSTony Krowiak 	return cc;
80745c9b47cSTony Krowiak }
80845c9b47cSTony Krowiak 
80945c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
81045c9b47cSTony Krowiak {
81145c9b47cSTony Krowiak 	u8 config[128];
81245c9b47cSTony Krowiak 	int cc;
81345c9b47cSTony Krowiak 
81445c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
81545c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
81645c9b47cSTony Krowiak 
81745c9b47cSTony Krowiak 		if (cc)
81845c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
81945c9b47cSTony Krowiak 		else
82045c9b47cSTony Krowiak 			return config[0] & 0x40;
82145c9b47cSTony Krowiak 	}
82245c9b47cSTony Krowiak 
82345c9b47cSTony Krowiak 	return 0;
82445c9b47cSTony Krowiak }
82545c9b47cSTony Krowiak 
82645c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
82745c9b47cSTony Krowiak {
82845c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
82945c9b47cSTony Krowiak 
83045c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
83145c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
83245c9b47cSTony Krowiak 	else
83345c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
83445c9b47cSTony Krowiak }
83545c9b47cSTony Krowiak 
8369d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
8379d8d5786SMichael Mueller {
8389d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
8399d8d5786SMichael Mueller 	cpu_id->version = 0xff;
8409d8d5786SMichael Mueller }
8419d8d5786SMichael Mueller 
8425102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
8435102ee87STony Krowiak {
8449d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
8455102ee87STony Krowiak 		return 0;
8465102ee87STony Krowiak 
8475102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
8485102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
8495102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
8505102ee87STony Krowiak 		return -ENOMEM;
8515102ee87STony Krowiak 
85245c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
8535102ee87STony Krowiak 
854ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
855ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
856ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
857ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
858ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
859ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
860ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
861a374e892STony Krowiak 
8625102ee87STony Krowiak 	return 0;
8635102ee87STony Krowiak }
8645102ee87STony Krowiak 
865e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
866b0c632dbSHeiko Carstens {
8679d8d5786SMichael Mueller 	int i, rc;
868b0c632dbSHeiko Carstens 	char debug_name[16];
869f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
870b0c632dbSHeiko Carstens 
871e08b9637SCarsten Otte 	rc = -EINVAL;
872e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
873e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
874e08b9637SCarsten Otte 		goto out_err;
875e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
876e08b9637SCarsten Otte 		goto out_err;
877e08b9637SCarsten Otte #else
878e08b9637SCarsten Otte 	if (type)
879e08b9637SCarsten Otte 		goto out_err;
880e08b9637SCarsten Otte #endif
881e08b9637SCarsten Otte 
882b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
883b0c632dbSHeiko Carstens 	if (rc)
884d89f5effSJan Kiszka 		goto out_err;
885b0c632dbSHeiko Carstens 
886b290411aSCarsten Otte 	rc = -ENOMEM;
887b290411aSCarsten Otte 
888b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
889b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
890d89f5effSJan Kiszka 		goto out_err;
891f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
892f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
893f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
894f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
895b0c632dbSHeiko Carstens 
896b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
897b0c632dbSHeiko Carstens 
898b0c632dbSHeiko Carstens 	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
899b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
900b0c632dbSHeiko Carstens 		goto out_nodbf;
901b0c632dbSHeiko Carstens 
9029d8d5786SMichael Mueller 	/*
9039d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
9049d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
905981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
906981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
9079d8d5786SMichael Mueller 	 * 31 bits and word aligned.
9089d8d5786SMichael Mueller 	 */
9099d8d5786SMichael Mueller 	kvm->arch.model.fac =
910981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9119d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
9129d8d5786SMichael Mueller 		goto out_nofac;
9139d8d5786SMichael Mueller 
914fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
915981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
91694422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
9179d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
9189d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
919981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9209d8d5786SMichael Mueller 		else
921981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
9229d8d5786SMichael Mueller 	}
9239d8d5786SMichael Mueller 
924981467c9SMichael Mueller 	/* Populate the facility list initially. */
925981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
926981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
927981467c9SMichael Mueller 
9289d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
929658b6edaSMichael Mueller 	kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9309d8d5786SMichael Mueller 
9315102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
9325102ee87STony Krowiak 		goto out_crypto;
9335102ee87STony Krowiak 
934ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
935ba5c1e9bSCarsten Otte 	INIT_LIST_HEAD(&kvm->arch.float_int.list);
9368a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
937a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
938ba5c1e9bSCarsten Otte 
939b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
940b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "%s", "vm created");
941b0c632dbSHeiko Carstens 
942e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
943e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
944e08b9637SCarsten Otte 	} else {
9450349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
946598841caSCarsten Otte 		if (!kvm->arch.gmap)
947598841caSCarsten Otte 			goto out_nogmap;
9482c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
94924eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
950e08b9637SCarsten Otte 	}
951fa6b7fe9SCornelia Huck 
952fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
95384223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
95468c55750SEric Farman 	kvm->arch.use_vectors = 0;
95572f25020SJason J. Herne 	kvm->arch.epoch = 0;
956fa6b7fe9SCornelia Huck 
9578ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
9588ad35755SDavid Hildenbrand 
959d89f5effSJan Kiszka 	return 0;
960598841caSCarsten Otte out_nogmap:
9615102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
9625102ee87STony Krowiak out_crypto:
9639d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
9649d8d5786SMichael Mueller out_nofac:
965598841caSCarsten Otte 	debug_unregister(kvm->arch.dbf);
966b0c632dbSHeiko Carstens out_nodbf:
967b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
968d89f5effSJan Kiszka out_err:
969d89f5effSJan Kiszka 	return rc;
970b0c632dbSHeiko Carstens }
971b0c632dbSHeiko Carstens 
972d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
973d329c035SChristian Borntraeger {
974d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
975ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
97667335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
9773c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
97858f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
97958f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
98058f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
981abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
982abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
983abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
98458f9460bSCarsten Otte 	}
985abf4a71eSCarsten Otte 	smp_mb();
98627e0393fSCarsten Otte 
98727e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
98827e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
98927e0393fSCarsten Otte 
990b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm))
991b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
992d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
993b31288faSKonstantin Weitz 
9946692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
995b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
996d329c035SChristian Borntraeger }
997d329c035SChristian Borntraeger 
998d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
999d329c035SChristian Borntraeger {
1000d329c035SChristian Borntraeger 	unsigned int i;
1001988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1002d329c035SChristian Borntraeger 
1003988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1004988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1005988a2caeSGleb Natapov 
1006988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1007988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1008d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1009988a2caeSGleb Natapov 
1010988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1011988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1012d329c035SChristian Borntraeger }
1013d329c035SChristian Borntraeger 
1014b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1015b0c632dbSHeiko Carstens {
1016d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
10179d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1018b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1019d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
10205102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
102127e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1022598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1023841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
102467335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
1025b0c632dbSHeiko Carstens }
1026b0c632dbSHeiko Carstens 
1027b0c632dbSHeiko Carstens /* Section: vcpu related */
1028dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1029b0c632dbSHeiko Carstens {
1030c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
103127e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
103227e0393fSCarsten Otte 		return -ENOMEM;
10332c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1034dafd032aSDominik Dingel 
103527e0393fSCarsten Otte 	return 0;
103627e0393fSCarsten Otte }
103727e0393fSCarsten Otte 
1038dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1039dafd032aSDominik Dingel {
1040dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1041dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
104259674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
104359674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
10449eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1045b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1046b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1047b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
104868c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
104968c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1050dafd032aSDominik Dingel 
1051dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1052dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1053dafd032aSDominik Dingel 
1054b0c632dbSHeiko Carstens 	return 0;
1055b0c632dbSHeiko Carstens }
1056b0c632dbSHeiko Carstens 
1057b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1058b0c632dbSHeiko Carstens {
10594725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
106068c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
106168c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
106268c55750SEric Farman 	else
10634725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1064b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
106568c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
106668c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
106768c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
106868c55750SEric Farman 	} else {
10694725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10704725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
107168c55750SEric Farman 	}
107259674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1073480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
10749e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1075b0c632dbSHeiko Carstens }
1076b0c632dbSHeiko Carstens 
1077b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1078b0c632dbSHeiko Carstens {
10799e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1080480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
108168c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
108268c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
108368c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
108468c55750SEric Farman 	} else {
10854725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
10864725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
108768c55750SEric Farman 	}
108859674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
10894725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
109068c55750SEric Farman 	if (vcpu->kvm->arch.use_vectors)
109168c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
109268c55750SEric Farman 	else
10934725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1094b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1095b0c632dbSHeiko Carstens }
1096b0c632dbSHeiko Carstens 
1097b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1098b0c632dbSHeiko Carstens {
1099b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1100b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1101b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
11028d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1103b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1104b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1105b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1106b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1107b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1108b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1109b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1110b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1111b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1112672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
11133c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
11143c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
11156352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
11166852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
11172ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1118b0c632dbSHeiko Carstens }
1119b0c632dbSHeiko Carstens 
112031928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
112142897d86SMarcelo Tosatti {
112272f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
112372f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
112472f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1125dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1126dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
112742897d86SMarcelo Tosatti }
112842897d86SMarcelo Tosatti 
11295102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
11305102ee87STony Krowiak {
11319d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
11325102ee87STony Krowiak 		return;
11335102ee87STony Krowiak 
1134a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1135a374e892STony Krowiak 
1136a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1137a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1138a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1139a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1140a374e892STony Krowiak 
11415102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
11425102ee87STony Krowiak }
11435102ee87STony Krowiak 
1144b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1145b31605c1SDominik Dingel {
1146b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1147b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1148b31605c1SDominik Dingel }
1149b31605c1SDominik Dingel 
1150b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1151b31605c1SDominik Dingel {
1152b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1153b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1154b31605c1SDominik Dingel 		return -ENOMEM;
1155b31605c1SDominik Dingel 
1156b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1157b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1158b31605c1SDominik Dingel 	return 0;
1159b31605c1SDominik Dingel }
1160b31605c1SDominik Dingel 
116191520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
116291520f1aSMichael Mueller {
116391520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
116491520f1aSMichael Mueller 
116591520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
116691520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
116791520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
116891520f1aSMichael Mueller }
116991520f1aSMichael Mueller 
1170b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1171b0c632dbSHeiko Carstens {
1172b31605c1SDominik Dingel 	int rc = 0;
1173b31288faSKonstantin Weitz 
11749e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
11759e6dabefSCornelia Huck 						    CPUSTAT_SM |
117669d0d3a3SChristian Borntraeger 						    CPUSTAT_STOPPED |
117769d0d3a3SChristian Borntraeger 						    CPUSTAT_GED);
117891520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
117991520f1aSMichael Mueller 
1180fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
11819d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
11827feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
11837feb6bb8SMichael Mueller 
118469d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1185ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
1186217a4406SHeiko Carstens 	if (sclp_has_siif())
1187217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
1188ea5f4969SDavid Hildenbrand 	if (sclp_has_sigpif())
1189ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
1190*13211ea7SEric Farman 	if (vcpu->kvm->arch.use_vectors) {
1191*13211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
1192*13211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
1193*13211ea7SEric Farman 	}
1194492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
11955a5e6536SMatthew Rosato 
1196b31605c1SDominik Dingel 	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1197b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1198b31605c1SDominik Dingel 		if (rc)
1199b31605c1SDominik Dingel 			return rc;
1200b31288faSKonstantin Weitz 	}
12010ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1202ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
12039d8d5786SMichael Mueller 
12045102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
12055102ee87STony Krowiak 
1206b31605c1SDominik Dingel 	return rc;
1207b0c632dbSHeiko Carstens }
1208b0c632dbSHeiko Carstens 
1209b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1210b0c632dbSHeiko Carstens 				      unsigned int id)
1211b0c632dbSHeiko Carstens {
12124d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
12137feb6bb8SMichael Mueller 	struct sie_page *sie_page;
12144d47555aSCarsten Otte 	int rc = -EINVAL;
1215b0c632dbSHeiko Carstens 
12164d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
12174d47555aSCarsten Otte 		goto out;
12184d47555aSCarsten Otte 
12194d47555aSCarsten Otte 	rc = -ENOMEM;
12204d47555aSCarsten Otte 
1221b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1222b0c632dbSHeiko Carstens 	if (!vcpu)
12234d47555aSCarsten Otte 		goto out;
1224b0c632dbSHeiko Carstens 
12257feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
12267feb6bb8SMichael Mueller 	if (!sie_page)
1227b0c632dbSHeiko Carstens 		goto out_free_cpu;
1228b0c632dbSHeiko Carstens 
12297feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
12307feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
123168c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
12327feb6bb8SMichael Mueller 
1233b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
123458f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
123558f9460bSCarsten Otte 		if (!kvm->arch.sca) {
123658f9460bSCarsten Otte 			WARN_ON_ONCE(1);
123758f9460bSCarsten Otte 			goto out_free_cpu;
123858f9460bSCarsten Otte 		}
1239abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
124058f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
124158f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
124258f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
124358f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1244b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1245fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
124658f9460bSCarsten Otte 	}
1247b0c632dbSHeiko Carstens 
1248ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1249ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1250d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
12515288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1252ba5c1e9bSCarsten Otte 
1253b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1254b0c632dbSHeiko Carstens 	if (rc)
12557b06bf2fSWei Yongjun 		goto out_free_sie_block;
1256b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1257b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1258ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1259b0c632dbSHeiko Carstens 
1260b0c632dbSHeiko Carstens 	return vcpu;
12617b06bf2fSWei Yongjun out_free_sie_block:
12627b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1263b0c632dbSHeiko Carstens out_free_cpu:
1264b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
12654d47555aSCarsten Otte out:
1266b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1267b0c632dbSHeiko Carstens }
1268b0c632dbSHeiko Carstens 
1269b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1270b0c632dbSHeiko Carstens {
12719a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1272b0c632dbSHeiko Carstens }
1273b0c632dbSHeiko Carstens 
127449b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu)
127549b99e1eSChristian Borntraeger {
127649b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
127749b99e1eSChristian Borntraeger }
127849b99e1eSChristian Borntraeger 
127949b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
128049b99e1eSChristian Borntraeger {
128149b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
128249b99e1eSChristian Borntraeger }
128349b99e1eSChristian Borntraeger 
128449b99e1eSChristian Borntraeger /*
128549b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
128649b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
128749b99e1eSChristian Borntraeger  * return immediately. */
128849b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
128949b99e1eSChristian Borntraeger {
129049b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
129149b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
129249b99e1eSChristian Borntraeger 		cpu_relax();
129349b99e1eSChristian Borntraeger }
129449b99e1eSChristian Borntraeger 
129549b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */
129649b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu)
129749b99e1eSChristian Borntraeger {
129849b99e1eSChristian Borntraeger 	s390_vcpu_block(vcpu);
129949b99e1eSChristian Borntraeger 	exit_sie(vcpu);
130049b99e1eSChristian Borntraeger }
130149b99e1eSChristian Borntraeger 
13022c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
13032c70fe44SChristian Borntraeger {
13042c70fe44SChristian Borntraeger 	int i;
13052c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
13062c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
13072c70fe44SChristian Borntraeger 
13082c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
13092c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1310fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
13112c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
13122c70fe44SChristian Borntraeger 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
13132c70fe44SChristian Borntraeger 			exit_sie_sync(vcpu);
13142c70fe44SChristian Borntraeger 		}
13152c70fe44SChristian Borntraeger 	}
13162c70fe44SChristian Borntraeger }
13172c70fe44SChristian Borntraeger 
1318b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1319b6d33834SChristoffer Dall {
1320b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1321b6d33834SChristoffer Dall 	BUG();
1322b6d33834SChristoffer Dall 	return 0;
1323b6d33834SChristoffer Dall }
1324b6d33834SChristoffer Dall 
132514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
132614eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
132714eebd91SCarsten Otte {
132814eebd91SCarsten Otte 	int r = -EINVAL;
132914eebd91SCarsten Otte 
133014eebd91SCarsten Otte 	switch (reg->id) {
133129b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
133229b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
133329b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
133429b7c71bSCarsten Otte 		break;
133529b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
133629b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
133729b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
133829b7c71bSCarsten Otte 		break;
133946a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
134046a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
134146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
134246a6dd1cSJason J. herne 		break;
134346a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
134446a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
134546a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
134646a6dd1cSJason J. herne 		break;
1347536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1348536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1349536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1350536336c2SDominik Dingel 		break;
1351536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1352536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1353536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1354536336c2SDominik Dingel 		break;
1355536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1356536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1357536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1358536336c2SDominik Dingel 		break;
1359672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1360672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1361672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1362672550fbSChristian Borntraeger 		break;
1363afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1364afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1365afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1366afa45ff5SChristian Borntraeger 		break;
136714eebd91SCarsten Otte 	default:
136814eebd91SCarsten Otte 		break;
136914eebd91SCarsten Otte 	}
137014eebd91SCarsten Otte 
137114eebd91SCarsten Otte 	return r;
137214eebd91SCarsten Otte }
137314eebd91SCarsten Otte 
137414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
137514eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
137614eebd91SCarsten Otte {
137714eebd91SCarsten Otte 	int r = -EINVAL;
137814eebd91SCarsten Otte 
137914eebd91SCarsten Otte 	switch (reg->id) {
138029b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
138129b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
138229b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
138329b7c71bSCarsten Otte 		break;
138429b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
138529b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
138629b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
138729b7c71bSCarsten Otte 		break;
138846a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
138946a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
139046a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
139146a6dd1cSJason J. herne 		break;
139246a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
139346a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
139446a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
139546a6dd1cSJason J. herne 		break;
1396536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1397536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1398536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
13999fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
14009fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1401536336c2SDominik Dingel 		break;
1402536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1403536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1404536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1405536336c2SDominik Dingel 		break;
1406536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1407536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1408536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1409536336c2SDominik Dingel 		break;
1410672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1411672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1412672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1413672550fbSChristian Borntraeger 		break;
1414afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1415afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1416afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1417afa45ff5SChristian Borntraeger 		break;
141814eebd91SCarsten Otte 	default:
141914eebd91SCarsten Otte 		break;
142014eebd91SCarsten Otte 	}
142114eebd91SCarsten Otte 
142214eebd91SCarsten Otte 	return r;
142314eebd91SCarsten Otte }
1424b6d33834SChristoffer Dall 
1425b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1426b0c632dbSHeiko Carstens {
1427b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1428b0c632dbSHeiko Carstens 	return 0;
1429b0c632dbSHeiko Carstens }
1430b0c632dbSHeiko Carstens 
1431b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1432b0c632dbSHeiko Carstens {
14335a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1434b0c632dbSHeiko Carstens 	return 0;
1435b0c632dbSHeiko Carstens }
1436b0c632dbSHeiko Carstens 
1437b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1438b0c632dbSHeiko Carstens {
14395a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1440b0c632dbSHeiko Carstens 	return 0;
1441b0c632dbSHeiko Carstens }
1442b0c632dbSHeiko Carstens 
1443b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1444b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1445b0c632dbSHeiko Carstens {
144659674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1447b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
144859674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1449b0c632dbSHeiko Carstens 	return 0;
1450b0c632dbSHeiko Carstens }
1451b0c632dbSHeiko Carstens 
1452b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1453b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1454b0c632dbSHeiko Carstens {
145559674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1456b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1457b0c632dbSHeiko Carstens 	return 0;
1458b0c632dbSHeiko Carstens }
1459b0c632dbSHeiko Carstens 
1460b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1461b0c632dbSHeiko Carstens {
14624725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
14634725c860SMartin Schwidefsky 		return -EINVAL;
1464b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
14654725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
14664725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
14674725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1468b0c632dbSHeiko Carstens 	return 0;
1469b0c632dbSHeiko Carstens }
1470b0c632dbSHeiko Carstens 
1471b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1472b0c632dbSHeiko Carstens {
1473b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1474b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1475b0c632dbSHeiko Carstens 	return 0;
1476b0c632dbSHeiko Carstens }
1477b0c632dbSHeiko Carstens 
1478b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1479b0c632dbSHeiko Carstens {
1480b0c632dbSHeiko Carstens 	int rc = 0;
1481b0c632dbSHeiko Carstens 
14827a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1483b0c632dbSHeiko Carstens 		rc = -EBUSY;
1484d7b0b5ebSCarsten Otte 	else {
1485d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1486d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1487d7b0b5ebSCarsten Otte 	}
1488b0c632dbSHeiko Carstens 	return rc;
1489b0c632dbSHeiko Carstens }
1490b0c632dbSHeiko Carstens 
1491b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1492b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1493b0c632dbSHeiko Carstens {
1494b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1495b0c632dbSHeiko Carstens }
1496b0c632dbSHeiko Carstens 
149727291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
149827291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
149927291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
150027291e21SDavid Hildenbrand 
1501d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1502d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1503b0c632dbSHeiko Carstens {
150427291e21SDavid Hildenbrand 	int rc = 0;
150527291e21SDavid Hildenbrand 
150627291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
150727291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
150827291e21SDavid Hildenbrand 
15092de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
151027291e21SDavid Hildenbrand 		return -EINVAL;
151127291e21SDavid Hildenbrand 
151227291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
151327291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
151427291e21SDavid Hildenbrand 		/* enforce guest PER */
151527291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
151627291e21SDavid Hildenbrand 
151727291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
151827291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
151927291e21SDavid Hildenbrand 	} else {
152027291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
152127291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
152227291e21SDavid Hildenbrand 	}
152327291e21SDavid Hildenbrand 
152427291e21SDavid Hildenbrand 	if (rc) {
152527291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
152627291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
152727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
152827291e21SDavid Hildenbrand 	}
152927291e21SDavid Hildenbrand 
153027291e21SDavid Hildenbrand 	return rc;
1531b0c632dbSHeiko Carstens }
1532b0c632dbSHeiko Carstens 
153362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
153462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
153562d9f0dbSMarcelo Tosatti {
15366352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
15376352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
15386352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
153962d9f0dbSMarcelo Tosatti }
154062d9f0dbSMarcelo Tosatti 
154162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
154262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
154362d9f0dbSMarcelo Tosatti {
15446352e4d2SDavid Hildenbrand 	int rc = 0;
15456352e4d2SDavid Hildenbrand 
15466352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
15476352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
15486352e4d2SDavid Hildenbrand 
15496352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
15506352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
15516352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
15526352e4d2SDavid Hildenbrand 		break;
15536352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
15546352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
15556352e4d2SDavid Hildenbrand 		break;
15566352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
15576352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
15586352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
15596352e4d2SDavid Hildenbrand 	default:
15606352e4d2SDavid Hildenbrand 		rc = -ENXIO;
15616352e4d2SDavid Hildenbrand 	}
15626352e4d2SDavid Hildenbrand 
15636352e4d2SDavid Hildenbrand 	return rc;
156462d9f0dbSMarcelo Tosatti }
156562d9f0dbSMarcelo Tosatti 
1566b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm)
1567b31605c1SDominik Dingel {
1568b31605c1SDominik Dingel 	if (!MACHINE_IS_LPAR)
1569b31605c1SDominik Dingel 		return false;
1570b31605c1SDominik Dingel 	/* only enable for z10 and later */
1571b31605c1SDominik Dingel 	if (!MACHINE_HAS_EDAT1)
1572b31605c1SDominik Dingel 		return false;
1573b31605c1SDominik Dingel 	if (!kvm->arch.use_cmma)
1574b31605c1SDominik Dingel 		return false;
1575b31605c1SDominik Dingel 	return true;
1576b31605c1SDominik Dingel }
1577b31605c1SDominik Dingel 
15788ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
15798ad35755SDavid Hildenbrand {
15808ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
15818ad35755SDavid Hildenbrand }
15828ad35755SDavid Hildenbrand 
15832c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
15842c70fe44SChristian Borntraeger {
15858ad35755SDavid Hildenbrand retry:
15868ad35755SDavid Hildenbrand 	s390_vcpu_unblock(vcpu);
15872c70fe44SChristian Borntraeger 	/*
15882c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
15892c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
15902c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
15912c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
15922c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
15932c70fe44SChristian Borntraeger 	 */
15948ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
15952c70fe44SChristian Borntraeger 		int rc;
15962c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1597fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
15982c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
15992c70fe44SChristian Borntraeger 		if (rc)
16002c70fe44SChristian Borntraeger 			return rc;
16018ad35755SDavid Hildenbrand 		goto retry;
16022c70fe44SChristian Borntraeger 	}
16038ad35755SDavid Hildenbrand 
1604d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1605d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1606d3d692c8SDavid Hildenbrand 		goto retry;
1607d3d692c8SDavid Hildenbrand 	}
1608d3d692c8SDavid Hildenbrand 
16098ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
16108ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
16118ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
16128ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
16138ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
16148ad35755SDavid Hildenbrand 		}
16158ad35755SDavid Hildenbrand 		goto retry;
16168ad35755SDavid Hildenbrand 	}
16178ad35755SDavid Hildenbrand 
16188ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
16198ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
16208ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
16218ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
16228ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
16238ad35755SDavid Hildenbrand 		}
16248ad35755SDavid Hildenbrand 		goto retry;
16258ad35755SDavid Hildenbrand 	}
16268ad35755SDavid Hildenbrand 
16270759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
16280759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
16290759d068SDavid Hildenbrand 
16302c70fe44SChristian Borntraeger 	return 0;
16312c70fe44SChristian Borntraeger }
16322c70fe44SChristian Borntraeger 
1633fa576c58SThomas Huth /**
1634fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1635fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1636fa576c58SThomas Huth  * @gpa: Guest physical address
1637fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1638fa576c58SThomas Huth  *
1639fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1640fa576c58SThomas Huth  *
1641fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1642fa576c58SThomas Huth  */
1643fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
164424eb3a82SDominik Dingel {
1645527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1646527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
164724eb3a82SDominik Dingel }
164824eb3a82SDominik Dingel 
16493c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
16503c038e6bSDominik Dingel 				      unsigned long token)
16513c038e6bSDominik Dingel {
16523c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1653383d0b05SJens Freimann 	struct kvm_s390_irq irq;
16543c038e6bSDominik Dingel 
16553c038e6bSDominik Dingel 	if (start_token) {
1656383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1657383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1658383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
16593c038e6bSDominik Dingel 	} else {
16603c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1661383d0b05SJens Freimann 		inti.parm64 = token;
16623c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
16633c038e6bSDominik Dingel 	}
16643c038e6bSDominik Dingel }
16653c038e6bSDominik Dingel 
16663c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
16673c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
16683c038e6bSDominik Dingel {
16693c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
16703c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
16713c038e6bSDominik Dingel }
16723c038e6bSDominik Dingel 
16733c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
16743c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
16753c038e6bSDominik Dingel {
16763c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
16773c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
16783c038e6bSDominik Dingel }
16793c038e6bSDominik Dingel 
16803c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
16813c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
16823c038e6bSDominik Dingel {
16833c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
16843c038e6bSDominik Dingel }
16853c038e6bSDominik Dingel 
16863c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
16873c038e6bSDominik Dingel {
16883c038e6bSDominik Dingel 	/*
16893c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
16903c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
16913c038e6bSDominik Dingel 	 */
16923c038e6bSDominik Dingel 	return true;
16933c038e6bSDominik Dingel }
16943c038e6bSDominik Dingel 
16953c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
16963c038e6bSDominik Dingel {
16973c038e6bSDominik Dingel 	hva_t hva;
16983c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
16993c038e6bSDominik Dingel 	int rc;
17003c038e6bSDominik Dingel 
17013c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
17023c038e6bSDominik Dingel 		return 0;
17033c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
17043c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
17053c038e6bSDominik Dingel 		return 0;
17063c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
17073c038e6bSDominik Dingel 		return 0;
17089a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
17093c038e6bSDominik Dingel 		return 0;
17103c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
17113c038e6bSDominik Dingel 		return 0;
17123c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
17133c038e6bSDominik Dingel 		return 0;
17143c038e6bSDominik Dingel 
171581480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
171681480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
171781480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
17183c038e6bSDominik Dingel 		return 0;
17193c038e6bSDominik Dingel 
17203c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
17213c038e6bSDominik Dingel 	return rc;
17223c038e6bSDominik Dingel }
17233c038e6bSDominik Dingel 
17243fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1725b0c632dbSHeiko Carstens {
17263fb4c40fSThomas Huth 	int rc, cpuflags;
1727e168bf8dSCarsten Otte 
17283c038e6bSDominik Dingel 	/*
17293c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
17303c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
17313c038e6bSDominik Dingel 	 * handled outside the worker.
17323c038e6bSDominik Dingel 	 */
17333c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
17343c038e6bSDominik Dingel 
17355a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1736b0c632dbSHeiko Carstens 
1737b0c632dbSHeiko Carstens 	if (need_resched())
1738b0c632dbSHeiko Carstens 		schedule();
1739b0c632dbSHeiko Carstens 
1740d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
174171cde587SChristian Borntraeger 		s390_handle_mcck();
174271cde587SChristian Borntraeger 
174379395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
174479395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
174579395031SJens Freimann 		if (rc)
174679395031SJens Freimann 			return rc;
174779395031SJens Freimann 	}
17480ff31867SCarsten Otte 
17492c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
17502c70fe44SChristian Borntraeger 	if (rc)
17512c70fe44SChristian Borntraeger 		return rc;
17522c70fe44SChristian Borntraeger 
175327291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
175427291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
175527291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
175627291e21SDavid Hildenbrand 	}
175727291e21SDavid Hildenbrand 
1758b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
17593fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
17603fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
17613fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
17622b29a9fdSDominik Dingel 
17633fb4c40fSThomas Huth 	return 0;
17643fb4c40fSThomas Huth }
17653fb4c40fSThomas Huth 
1766492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1767492d8642SThomas Huth {
1768492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
1769492d8642SThomas Huth 	u8 opcode;
1770492d8642SThomas Huth 	int rc;
1771492d8642SThomas Huth 
1772492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1773492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
1774492d8642SThomas Huth 
1775492d8642SThomas Huth 	/*
1776492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
1777492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
1778492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
1779492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
1780492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
1781492d8642SThomas Huth 	 * to be able to forward the PSW.
1782492d8642SThomas Huth 	 */
1783492d8642SThomas Huth 	rc = read_guest(vcpu, psw->addr, &opcode, 1);
1784492d8642SThomas Huth 	if (rc)
1785492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
1786492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1787492d8642SThomas Huth 
1788492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1789492d8642SThomas Huth }
1790492d8642SThomas Huth 
17913fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
17923fb4c40fSThomas Huth {
179324eb3a82SDominik Dingel 	int rc = -1;
17942b29a9fdSDominik Dingel 
17952b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
17962b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
17972b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
17982b29a9fdSDominik Dingel 
179927291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
180027291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
180127291e21SDavid Hildenbrand 
18023fb4c40fSThomas Huth 	if (exit_reason >= 0) {
18037c470539SMartin Schwidefsky 		rc = 0;
1804210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
1805210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1806210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
1807210b1607SThomas Huth 						current->thread.gmap_addr;
1808210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
1809210b1607SThomas Huth 		rc = -EREMOTE;
181024eb3a82SDominik Dingel 
181124eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
18123c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
181324eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
1814fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
181524eb3a82SDominik Dingel 			rc = 0;
1816fa576c58SThomas Huth 		} else {
1817fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
1818fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1819fa576c58SThomas Huth 		}
182024eb3a82SDominik Dingel 	}
182124eb3a82SDominik Dingel 
1822492d8642SThomas Huth 	if (rc == -1)
1823492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
1824b0c632dbSHeiko Carstens 
18255a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
18263fb4c40fSThomas Huth 
1827a76ccff6SThomas Huth 	if (rc == 0) {
1828a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
18292955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
18302955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1831a76ccff6SThomas Huth 		else
1832a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
1833a76ccff6SThomas Huth 	}
1834a76ccff6SThomas Huth 
18353fb4c40fSThomas Huth 	return rc;
18363fb4c40fSThomas Huth }
18373fb4c40fSThomas Huth 
18383fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
18393fb4c40fSThomas Huth {
18403fb4c40fSThomas Huth 	int rc, exit_reason;
18413fb4c40fSThomas Huth 
1842800c1065SThomas Huth 	/*
1843800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1844800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
1845800c1065SThomas Huth 	 */
1846800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1847800c1065SThomas Huth 
1848a76ccff6SThomas Huth 	do {
18493fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
18503fb4c40fSThomas Huth 		if (rc)
1851a76ccff6SThomas Huth 			break;
18523fb4c40fSThomas Huth 
1853800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
18543fb4c40fSThomas Huth 		/*
1855a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
1856a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
18573fb4c40fSThomas Huth 		 */
18583fb4c40fSThomas Huth 		preempt_disable();
18593fb4c40fSThomas Huth 		kvm_guest_enter();
18603fb4c40fSThomas Huth 		preempt_enable();
1861a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
1862a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
18633fb4c40fSThomas Huth 		kvm_guest_exit();
1864800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
18653fb4c40fSThomas Huth 
18663fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
186727291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
18683fb4c40fSThomas Huth 
1869800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1870e168bf8dSCarsten Otte 	return rc;
1871b0c632dbSHeiko Carstens }
1872b0c632dbSHeiko Carstens 
1873b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1874b028ee3eSDavid Hildenbrand {
1875b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1876b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1877b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1878b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1879b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1880b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1881d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
1882d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1883b028ee3eSDavid Hildenbrand 	}
1884b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1885b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1886b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1887b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1888b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1889b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1890b028ee3eSDavid Hildenbrand 	}
1891b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1892b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1893b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1894b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
18959fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
18969fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1897b028ee3eSDavid Hildenbrand 	}
1898b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
1899b028ee3eSDavid Hildenbrand }
1900b028ee3eSDavid Hildenbrand 
1901b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1902b028ee3eSDavid Hildenbrand {
1903b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1904b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1905b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1906b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1907b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1908b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1909b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1910b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1911b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1912b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1913b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1914b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1915b028ee3eSDavid Hildenbrand }
1916b028ee3eSDavid Hildenbrand 
1917b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1918b0c632dbSHeiko Carstens {
19198f2abe6aSChristian Borntraeger 	int rc;
1920b0c632dbSHeiko Carstens 	sigset_t sigsaved;
1921b0c632dbSHeiko Carstens 
192227291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
192327291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
192427291e21SDavid Hildenbrand 		return 0;
192527291e21SDavid Hildenbrand 	}
192627291e21SDavid Hildenbrand 
1927b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1928b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1929b0c632dbSHeiko Carstens 
19306352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
19316852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
19326352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
19336352e4d2SDavid Hildenbrand 		pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
19346352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
19356352e4d2SDavid Hildenbrand 		return -EINVAL;
19366352e4d2SDavid Hildenbrand 	}
1937b0c632dbSHeiko Carstens 
1938b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
1939d7b0b5ebSCarsten Otte 
1940dab4079dSHeiko Carstens 	might_fault();
1941e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
19429ace903dSChristian Ehrhardt 
1943b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
1944b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
19458f2abe6aSChristian Borntraeger 		rc = -EINTR;
1946b1d16c49SChristian Ehrhardt 	}
19478f2abe6aSChristian Borntraeger 
194827291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
194927291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
195027291e21SDavid Hildenbrand 		rc = 0;
195127291e21SDavid Hildenbrand 	}
195227291e21SDavid Hildenbrand 
1953b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
19548f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
19558f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
19568f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
19578f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
19588f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
19598f2abe6aSChristian Borntraeger 		rc = 0;
19608f2abe6aSChristian Borntraeger 	}
19618f2abe6aSChristian Borntraeger 
19628f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
19638f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
19648f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
19658f2abe6aSChristian Borntraeger 		rc = 0;
19668f2abe6aSChristian Borntraeger 	}
19678f2abe6aSChristian Borntraeger 
1968b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
1969d7b0b5ebSCarsten Otte 
1970b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
1971b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1972b0c632dbSHeiko Carstens 
1973b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
19747e8e6ab4SHeiko Carstens 	return rc;
1975b0c632dbSHeiko Carstens }
1976b0c632dbSHeiko Carstens 
1977b0c632dbSHeiko Carstens /*
1978b0c632dbSHeiko Carstens  * store status at address
1979b0c632dbSHeiko Carstens  * we use have two special cases:
1980b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1981b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1982b0c632dbSHeiko Carstens  */
1983d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1984b0c632dbSHeiko Carstens {
1985092670cdSCarsten Otte 	unsigned char archmode = 1;
1986fda902cbSMichael Mueller 	unsigned int px;
1987178bd789SThomas Huth 	u64 clkcomp;
1988d0bce605SHeiko Carstens 	int rc;
1989b0c632dbSHeiko Carstens 
1990d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1991d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
1992b0c632dbSHeiko Carstens 			return -EFAULT;
1993d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
1994d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1995d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
1996b0c632dbSHeiko Carstens 			return -EFAULT;
1997d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1998d0bce605SHeiko Carstens 	}
1999d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2000d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
2001d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2002d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2003d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2004d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2005fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2006d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2007fda902cbSMichael Mueller 			      &px, 4);
2008d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2009d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2010d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2011d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2012d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2013d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2014d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2015178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2016d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2017d0bce605SHeiko Carstens 			      &clkcomp, 8);
2018d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2019d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2020d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2021d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2022d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2023b0c632dbSHeiko Carstens }
2024b0c632dbSHeiko Carstens 
2025e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2026e879892cSThomas Huth {
2027e879892cSThomas Huth 	/*
2028e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2029e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2030e879892cSThomas Huth 	 * it into the save area
2031e879892cSThomas Huth 	 */
2032e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2033e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2034e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2035e879892cSThomas Huth 
2036e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2037e879892cSThomas Huth }
2038e879892cSThomas Huth 
2039bc17de7cSEric Farman /*
2040bc17de7cSEric Farman  * store additional status at address
2041bc17de7cSEric Farman  */
2042bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2043bc17de7cSEric Farman 					unsigned long gpa)
2044bc17de7cSEric Farman {
2045bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2046bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2047bc17de7cSEric Farman 		return 0;
2048bc17de7cSEric Farman 
2049bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2050bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2051bc17de7cSEric Farman }
2052bc17de7cSEric Farman 
2053bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2054bc17de7cSEric Farman {
2055bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2056bc17de7cSEric Farman 		return 0;
2057bc17de7cSEric Farman 
2058bc17de7cSEric Farman 	/*
2059bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2060bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2061bc17de7cSEric Farman 	 * it into the save area.
2062bc17de7cSEric Farman 	 */
2063bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2064bc17de7cSEric Farman 
2065bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2066bc17de7cSEric Farman }
2067bc17de7cSEric Farman 
20688ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20698ad35755SDavid Hildenbrand {
20708ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
20718ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
20728ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20738ad35755SDavid Hildenbrand }
20748ad35755SDavid Hildenbrand 
20758ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
20768ad35755SDavid Hildenbrand {
20778ad35755SDavid Hildenbrand 	unsigned int i;
20788ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
20798ad35755SDavid Hildenbrand 
20808ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
20818ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
20828ad35755SDavid Hildenbrand 	}
20838ad35755SDavid Hildenbrand }
20848ad35755SDavid Hildenbrand 
20858ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
20868ad35755SDavid Hildenbrand {
20878ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
20888ad35755SDavid Hildenbrand 	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
20898ad35755SDavid Hildenbrand 	exit_sie_sync(vcpu);
20908ad35755SDavid Hildenbrand }
20918ad35755SDavid Hildenbrand 
20926852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
20936852d7b6SDavid Hildenbrand {
20948ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
20958ad35755SDavid Hildenbrand 
20968ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
20978ad35755SDavid Hildenbrand 		return;
20988ad35755SDavid Hildenbrand 
20996852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
21008ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2101433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21028ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21038ad35755SDavid Hildenbrand 
21048ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21058ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
21068ad35755SDavid Hildenbrand 			started_vcpus++;
21078ad35755SDavid Hildenbrand 	}
21088ad35755SDavid Hildenbrand 
21098ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
21108ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
21118ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
21128ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
21138ad35755SDavid Hildenbrand 		/*
21148ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
21158ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
21168ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
21178ad35755SDavid Hildenbrand 		 */
21188ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
21198ad35755SDavid Hildenbrand 	}
21208ad35755SDavid Hildenbrand 
21216852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21228ad35755SDavid Hildenbrand 	/*
21238ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
21248ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
21258ad35755SDavid Hildenbrand 	 */
2126d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2127433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21288ad35755SDavid Hildenbrand 	return;
21296852d7b6SDavid Hildenbrand }
21306852d7b6SDavid Hildenbrand 
21316852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
21326852d7b6SDavid Hildenbrand {
21338ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
21348ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
21358ad35755SDavid Hildenbrand 
21368ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
21378ad35755SDavid Hildenbrand 		return;
21388ad35755SDavid Hildenbrand 
21396852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
21408ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2141433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
21428ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
21438ad35755SDavid Hildenbrand 
214432f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
21456cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
214632f5ff63SDavid Hildenbrand 
21476cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
21488ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
21498ad35755SDavid Hildenbrand 
21508ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
21518ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
21528ad35755SDavid Hildenbrand 			started_vcpus++;
21538ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
21548ad35755SDavid Hildenbrand 		}
21558ad35755SDavid Hildenbrand 	}
21568ad35755SDavid Hildenbrand 
21578ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
21588ad35755SDavid Hildenbrand 		/*
21598ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
21608ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
21618ad35755SDavid Hildenbrand 		 */
21628ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
21638ad35755SDavid Hildenbrand 	}
21648ad35755SDavid Hildenbrand 
2165433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
21668ad35755SDavid Hildenbrand 	return;
21676852d7b6SDavid Hildenbrand }
21686852d7b6SDavid Hildenbrand 
2169d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2170d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2171d6712df9SCornelia Huck {
2172d6712df9SCornelia Huck 	int r;
2173d6712df9SCornelia Huck 
2174d6712df9SCornelia Huck 	if (cap->flags)
2175d6712df9SCornelia Huck 		return -EINVAL;
2176d6712df9SCornelia Huck 
2177d6712df9SCornelia Huck 	switch (cap->cap) {
2178fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2179fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2180fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2181fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2182fa6b7fe9SCornelia Huck 		}
2183fa6b7fe9SCornelia Huck 		r = 0;
2184fa6b7fe9SCornelia Huck 		break;
2185d6712df9SCornelia Huck 	default:
2186d6712df9SCornelia Huck 		r = -EINVAL;
2187d6712df9SCornelia Huck 		break;
2188d6712df9SCornelia Huck 	}
2189d6712df9SCornelia Huck 	return r;
2190d6712df9SCornelia Huck }
2191d6712df9SCornelia Huck 
2192b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2193b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2194b0c632dbSHeiko Carstens {
2195b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2196b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2197800c1065SThomas Huth 	int idx;
2198bc923cc9SAvi Kivity 	long r;
2199b0c632dbSHeiko Carstens 
220093736624SAvi Kivity 	switch (ioctl) {
220193736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2202ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2203383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2204ba5c1e9bSCarsten Otte 
220593736624SAvi Kivity 		r = -EFAULT;
2206ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
220793736624SAvi Kivity 			break;
2208383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2209383d0b05SJens Freimann 			return -EINVAL;
2210383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
221193736624SAvi Kivity 		break;
2212ba5c1e9bSCarsten Otte 	}
2213b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2214800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2215bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2216800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2217bc923cc9SAvi Kivity 		break;
2218b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2219b0c632dbSHeiko Carstens 		psw_t psw;
2220b0c632dbSHeiko Carstens 
2221bc923cc9SAvi Kivity 		r = -EFAULT;
2222b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2223bc923cc9SAvi Kivity 			break;
2224bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2225bc923cc9SAvi Kivity 		break;
2226b0c632dbSHeiko Carstens 	}
2227b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2228bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2229bc923cc9SAvi Kivity 		break;
223014eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
223114eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
223214eebd91SCarsten Otte 		struct kvm_one_reg reg;
223314eebd91SCarsten Otte 		r = -EFAULT;
223414eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
223514eebd91SCarsten Otte 			break;
223614eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
223714eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
223814eebd91SCarsten Otte 		else
223914eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
224014eebd91SCarsten Otte 		break;
224114eebd91SCarsten Otte 	}
224227e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
224327e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
224427e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
224527e0393fSCarsten Otte 
224627e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
224727e0393fSCarsten Otte 			r = -EFAULT;
224827e0393fSCarsten Otte 			break;
224927e0393fSCarsten Otte 		}
225027e0393fSCarsten Otte 
225127e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
225227e0393fSCarsten Otte 			r = -EINVAL;
225327e0393fSCarsten Otte 			break;
225427e0393fSCarsten Otte 		}
225527e0393fSCarsten Otte 
225627e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
225727e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
225827e0393fSCarsten Otte 		break;
225927e0393fSCarsten Otte 	}
226027e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
226127e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
226227e0393fSCarsten Otte 
226327e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
226427e0393fSCarsten Otte 			r = -EFAULT;
226527e0393fSCarsten Otte 			break;
226627e0393fSCarsten Otte 		}
226727e0393fSCarsten Otte 
226827e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
226927e0393fSCarsten Otte 			r = -EINVAL;
227027e0393fSCarsten Otte 			break;
227127e0393fSCarsten Otte 		}
227227e0393fSCarsten Otte 
227327e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
227427e0393fSCarsten Otte 			ucasmap.length);
227527e0393fSCarsten Otte 		break;
227627e0393fSCarsten Otte 	}
227727e0393fSCarsten Otte #endif
2278ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2279527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2280ccc7910fSCarsten Otte 		break;
2281ccc7910fSCarsten Otte 	}
2282d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2283d6712df9SCornelia Huck 	{
2284d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2285d6712df9SCornelia Huck 		r = -EFAULT;
2286d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2287d6712df9SCornelia Huck 			break;
2288d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2289d6712df9SCornelia Huck 		break;
2290d6712df9SCornelia Huck 	}
2291b0c632dbSHeiko Carstens 	default:
22923e6afcf1SCarsten Otte 		r = -ENOTTY;
2293b0c632dbSHeiko Carstens 	}
2294bc923cc9SAvi Kivity 	return r;
2295b0c632dbSHeiko Carstens }
2296b0c632dbSHeiko Carstens 
22975b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
22985b1c1493SCarsten Otte {
22995b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
23005b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
23015b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
23025b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
23035b1c1493SCarsten Otte 		get_page(vmf->page);
23045b1c1493SCarsten Otte 		return 0;
23055b1c1493SCarsten Otte 	}
23065b1c1493SCarsten Otte #endif
23075b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
23085b1c1493SCarsten Otte }
23095b1c1493SCarsten Otte 
23105587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
23115587027cSAneesh Kumar K.V 			    unsigned long npages)
2312db3fe4ebSTakuya Yoshikawa {
2313db3fe4ebSTakuya Yoshikawa 	return 0;
2314db3fe4ebSTakuya Yoshikawa }
2315db3fe4ebSTakuya Yoshikawa 
2316b0c632dbSHeiko Carstens /* Section: memory related */
2317f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2318f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
23197b6195a9STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem,
23207b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2321b0c632dbSHeiko Carstens {
2322dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2323dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2324dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2325dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2326b0c632dbSHeiko Carstens 
2327598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2328b0c632dbSHeiko Carstens 		return -EINVAL;
2329b0c632dbSHeiko Carstens 
2330598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2331b0c632dbSHeiko Carstens 		return -EINVAL;
2332b0c632dbSHeiko Carstens 
2333f7784b8eSMarcelo Tosatti 	return 0;
2334f7784b8eSMarcelo Tosatti }
2335f7784b8eSMarcelo Tosatti 
2336f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
2337f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
23388482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
23398482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2340f7784b8eSMarcelo Tosatti {
2341f7850c92SCarsten Otte 	int rc;
2342f7784b8eSMarcelo Tosatti 
23432cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
23442cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
23452cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
23462cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
23472cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
23482cef4debSChristian Borntraeger 	 */
23492cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
23502cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
23512cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
23522cef4debSChristian Borntraeger 		return;
2353598841caSCarsten Otte 
2354598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2355598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2356598841caSCarsten Otte 	if (rc)
2357f7850c92SCarsten Otte 		printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
2358598841caSCarsten Otte 	return;
2359b0c632dbSHeiko Carstens }
2360b0c632dbSHeiko Carstens 
2361b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2362b0c632dbSHeiko Carstens {
23639d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2364b0c632dbSHeiko Carstens }
2365b0c632dbSHeiko Carstens 
2366b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2367b0c632dbSHeiko Carstens {
2368b0c632dbSHeiko Carstens 	kvm_exit();
2369b0c632dbSHeiko Carstens }
2370b0c632dbSHeiko Carstens 
2371b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2372b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2373566af940SCornelia Huck 
2374566af940SCornelia Huck /*
2375566af940SCornelia Huck  * Enable autoloading of the kvm module.
2376566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2377566af940SCornelia Huck  * since x86 takes a different approach.
2378566af940SCornelia Huck  */
2379566af940SCornelia Huck #include <linux/miscdevice.h>
2380566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2381566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2382