xref: /openbmc/linux/arch/s390/kvm/kvm-s390.c (revision 152b28392a8d9dd08e789b48b602eb75eef436fa)
1b0c632dbSHeiko Carstens /*
2a53c8fabSHeiko Carstens  * hosting zSeries kernel virtual machines
3b0c632dbSHeiko Carstens  *
4628eb9b8SChristian Ehrhardt  * Copyright IBM Corp. 2008, 2009
5b0c632dbSHeiko Carstens  *
6b0c632dbSHeiko Carstens  * This program is free software; you can redistribute it and/or modify
7b0c632dbSHeiko Carstens  * it under the terms of the GNU General Public License (version 2 only)
8b0c632dbSHeiko Carstens  * as published by the Free Software Foundation.
9b0c632dbSHeiko Carstens  *
10b0c632dbSHeiko Carstens  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11b0c632dbSHeiko Carstens  *               Christian Borntraeger <borntraeger@de.ibm.com>
12b0c632dbSHeiko Carstens  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13628eb9b8SChristian Ehrhardt  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
1415f36ebdSJason J. Herne  *               Jason J. Herne <jjherne@us.ibm.com>
15b0c632dbSHeiko Carstens  */
16b0c632dbSHeiko Carstens 
17b0c632dbSHeiko Carstens #include <linux/compiler.h>
18b0c632dbSHeiko Carstens #include <linux/err.h>
19b0c632dbSHeiko Carstens #include <linux/fs.h>
20ca872302SChristian Borntraeger #include <linux/hrtimer.h>
21b0c632dbSHeiko Carstens #include <linux/init.h>
22b0c632dbSHeiko Carstens #include <linux/kvm.h>
23b0c632dbSHeiko Carstens #include <linux/kvm_host.h>
24b0c632dbSHeiko Carstens #include <linux/module.h>
25a374e892STony Krowiak #include <linux/random.h>
26b0c632dbSHeiko Carstens #include <linux/slab.h>
27ba5c1e9bSCarsten Otte #include <linux/timer.h>
2841408c28SThomas Huth #include <linux/vmalloc.h>
29cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
30b0c632dbSHeiko Carstens #include <asm/lowcore.h>
31fdf03650SFan Zhang #include <asm/etr.h>
32b0c632dbSHeiko Carstens #include <asm/pgtable.h>
33f5daba1dSHeiko Carstens #include <asm/nmi.h>
34a0616cdeSDavid Howells #include <asm/switch_to.h>
356d3da241SJens Freimann #include <asm/isc.h>
361526bf9cSChristian Borntraeger #include <asm/sclp.h>
378f2abe6aSChristian Borntraeger #include "kvm-s390.h"
38b0c632dbSHeiko Carstens #include "gaccess.h"
39b0c632dbSHeiko Carstens 
40ea2cdd27SDavid Hildenbrand #define KMSG_COMPONENT "kvm-s390"
41ea2cdd27SDavid Hildenbrand #undef pr_fmt
42ea2cdd27SDavid Hildenbrand #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43ea2cdd27SDavid Hildenbrand 
445786fffaSCornelia Huck #define CREATE_TRACE_POINTS
455786fffaSCornelia Huck #include "trace.h"
46ade38c31SCornelia Huck #include "trace-s390.h"
475786fffaSCornelia Huck 
4841408c28SThomas Huth #define MEM_OP_MAX_SIZE 65536	/* Maximum transfer size for KVM_S390_MEM_OP */
49816c7667SJens Freimann #define LOCAL_IRQS 32
50816c7667SJens Freimann #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51816c7667SJens Freimann 			   (KVM_MAX_VCPUS + LOCAL_IRQS))
5241408c28SThomas Huth 
53b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54b0c632dbSHeiko Carstens 
55b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = {
56b0c632dbSHeiko Carstens 	{ "userspace_handled", VCPU_STAT(exit_userspace) },
570eaeafa1SChristian Borntraeger 	{ "exit_null", VCPU_STAT(exit_null) },
588f2abe6aSChristian Borntraeger 	{ "exit_validity", VCPU_STAT(exit_validity) },
598f2abe6aSChristian Borntraeger 	{ "exit_stop_request", VCPU_STAT(exit_stop_request) },
608f2abe6aSChristian Borntraeger 	{ "exit_external_request", VCPU_STAT(exit_external_request) },
618f2abe6aSChristian Borntraeger 	{ "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
62ba5c1e9bSCarsten Otte 	{ "exit_instruction", VCPU_STAT(exit_instruction) },
63ba5c1e9bSCarsten Otte 	{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64ba5c1e9bSCarsten Otte 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65f7819512SPaolo Bonzini 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66ce2e4f0bSDavid Hildenbrand 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
67f5e10b09SChristian Borntraeger 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
68ba5c1e9bSCarsten Otte 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
69aba07508SDavid Hildenbrand 	{ "instruction_stctl", VCPU_STAT(instruction_stctl) },
70aba07508SDavid Hildenbrand 	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },
71ba5c1e9bSCarsten Otte 	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
727697e71fSChristian Ehrhardt 	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },
73ba5c1e9bSCarsten Otte 	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
74ba5c1e9bSCarsten Otte 	{ "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
75ba5c1e9bSCarsten Otte 	{ "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
76ba5c1e9bSCarsten Otte 	{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
77ba5c1e9bSCarsten Otte 	{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
78ba5c1e9bSCarsten Otte 	{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
79ba5c1e9bSCarsten Otte 	{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
8069d0d3a3SChristian Borntraeger 	{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
81453423dcSChristian Borntraeger 	{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
82453423dcSChristian Borntraeger 	{ "instruction_spx", VCPU_STAT(instruction_spx) },
83453423dcSChristian Borntraeger 	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
84453423dcSChristian Borntraeger 	{ "instruction_stap", VCPU_STAT(instruction_stap) },
85453423dcSChristian Borntraeger 	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
868a242234SHeiko Carstens 	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
87453423dcSChristian Borntraeger 	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },
88453423dcSChristian Borntraeger 	{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
89b31288faSKonstantin Weitz 	{ "instruction_essa", VCPU_STAT(instruction_essa) },
90453423dcSChristian Borntraeger 	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
91453423dcSChristian Borntraeger 	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
92bb25b9baSChristian Borntraeger 	{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
935288fbf0SChristian Borntraeger 	{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
94bd59d3a4SCornelia Huck 	{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
957697e71fSChristian Ehrhardt 	{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
965288fbf0SChristian Borntraeger 	{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
9742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
9842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
995288fbf0SChristian Borntraeger 	{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
10042cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
10142cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
102cd7b4b61SEric Farman 	{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
1035288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
1045288fbf0SChristian Borntraeger 	{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
1055288fbf0SChristian Borntraeger 	{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
10642cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
10742cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
10842cb0c9fSDavid Hildenbrand 	{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
109388186bcSChristian Borntraeger 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
110e28acfeaSChristian Borntraeger 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
11141628d33SKonstantin Weitz 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
112175a5c9eSChristian Borntraeger 	{ "diagnose_258", VCPU_STAT(diagnose_258) },
113175a5c9eSChristian Borntraeger 	{ "diagnose_308", VCPU_STAT(diagnose_308) },
114175a5c9eSChristian Borntraeger 	{ "diagnose_500", VCPU_STAT(diagnose_500) },
115b0c632dbSHeiko Carstens 	{ NULL }
116b0c632dbSHeiko Carstens };
117b0c632dbSHeiko Carstens 
1189d8d5786SMichael Mueller /* upper facilities limit for kvm */
1199d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = {
120a3ed8daeSChristian Borntraeger 	0xffe6fffbfcfdfc40UL,
12153df84f8SGuenther Hutzl 	0x005e800000000000UL,
1229d8d5786SMichael Mueller };
123b0c632dbSHeiko Carstens 
1249d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void)
12578c4b59fSMichael Mueller {
1269d8d5786SMichael Mueller 	BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
1279d8d5786SMichael Mueller 	return ARRAY_SIZE(kvm_s390_fac_list_mask);
12878c4b59fSMichael Mueller }
12978c4b59fSMichael Mueller 
1309d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier;
13178f26131SChristian Borntraeger debug_info_t *kvm_s390_dbf;
1329d8d5786SMichael Mueller 
133b0c632dbSHeiko Carstens /* Section: not file related */
13413a34e06SRadim Krčmář int kvm_arch_hardware_enable(void)
135b0c632dbSHeiko Carstens {
136b0c632dbSHeiko Carstens 	/* every s390 is virtualization enabled ;-) */
13710474ae8SAlexander Graf 	return 0;
138b0c632dbSHeiko Carstens }
139b0c632dbSHeiko Carstens 
1402c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
1412c70fe44SChristian Borntraeger 
142fdf03650SFan Zhang /*
143fdf03650SFan Zhang  * This callback is executed during stop_machine(). All CPUs are therefore
144fdf03650SFan Zhang  * temporarily stopped. In order not to change guest behavior, we have to
145fdf03650SFan Zhang  * disable preemption whenever we touch the epoch of kvm and the VCPUs,
146fdf03650SFan Zhang  * so a CPU won't be stopped while calculating with the epoch.
147fdf03650SFan Zhang  */
148fdf03650SFan Zhang static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
149fdf03650SFan Zhang 			  void *v)
150fdf03650SFan Zhang {
151fdf03650SFan Zhang 	struct kvm *kvm;
152fdf03650SFan Zhang 	struct kvm_vcpu *vcpu;
153fdf03650SFan Zhang 	int i;
154fdf03650SFan Zhang 	unsigned long long *delta = v;
155fdf03650SFan Zhang 
156fdf03650SFan Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
157fdf03650SFan Zhang 		kvm->arch.epoch -= *delta;
158fdf03650SFan Zhang 		kvm_for_each_vcpu(i, vcpu, kvm) {
159fdf03650SFan Zhang 			vcpu->arch.sie_block->epoch -= *delta;
160fdf03650SFan Zhang 		}
161fdf03650SFan Zhang 	}
162fdf03650SFan Zhang 	return NOTIFY_OK;
163fdf03650SFan Zhang }
164fdf03650SFan Zhang 
165fdf03650SFan Zhang static struct notifier_block kvm_clock_notifier = {
166fdf03650SFan Zhang 	.notifier_call = kvm_clock_sync,
167fdf03650SFan Zhang };
168fdf03650SFan Zhang 
169b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void)
170b0c632dbSHeiko Carstens {
1712c70fe44SChristian Borntraeger 	gmap_notifier.notifier_call = kvm_gmap_notifier;
1722c70fe44SChristian Borntraeger 	gmap_register_ipte_notifier(&gmap_notifier);
173fdf03650SFan Zhang 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
174fdf03650SFan Zhang 				       &kvm_clock_notifier);
175b0c632dbSHeiko Carstens 	return 0;
176b0c632dbSHeiko Carstens }
177b0c632dbSHeiko Carstens 
178b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void)
179b0c632dbSHeiko Carstens {
1802c70fe44SChristian Borntraeger 	gmap_unregister_ipte_notifier(&gmap_notifier);
181fdf03650SFan Zhang 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
182fdf03650SFan Zhang 					 &kvm_clock_notifier);
183b0c632dbSHeiko Carstens }
184b0c632dbSHeiko Carstens 
185b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque)
186b0c632dbSHeiko Carstens {
18778f26131SChristian Borntraeger 	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
18878f26131SChristian Borntraeger 	if (!kvm_s390_dbf)
18978f26131SChristian Borntraeger 		return -ENOMEM;
19078f26131SChristian Borntraeger 
19178f26131SChristian Borntraeger 	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
19278f26131SChristian Borntraeger 		debug_unregister(kvm_s390_dbf);
19378f26131SChristian Borntraeger 		return -ENOMEM;
19478f26131SChristian Borntraeger 	}
19578f26131SChristian Borntraeger 
19684877d93SCornelia Huck 	/* Register floating interrupt controller interface. */
19784877d93SCornelia Huck 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
198b0c632dbSHeiko Carstens }
199b0c632dbSHeiko Carstens 
20078f26131SChristian Borntraeger void kvm_arch_exit(void)
20178f26131SChristian Borntraeger {
20278f26131SChristian Borntraeger 	debug_unregister(kvm_s390_dbf);
20378f26131SChristian Borntraeger }
20478f26131SChristian Borntraeger 
205b0c632dbSHeiko Carstens /* Section: device related */
206b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp,
207b0c632dbSHeiko Carstens 			unsigned int ioctl, unsigned long arg)
208b0c632dbSHeiko Carstens {
209b0c632dbSHeiko Carstens 	if (ioctl == KVM_S390_ENABLE_SIE)
210b0c632dbSHeiko Carstens 		return s390_enable_sie();
211b0c632dbSHeiko Carstens 	return -EINVAL;
212b0c632dbSHeiko Carstens }
213b0c632dbSHeiko Carstens 
214784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
215b0c632dbSHeiko Carstens {
216d7b0b5ebSCarsten Otte 	int r;
217d7b0b5ebSCarsten Otte 
2182bd0ac4eSCarsten Otte 	switch (ext) {
219d7b0b5ebSCarsten Otte 	case KVM_CAP_S390_PSW:
220b6cf8788SChristian Borntraeger 	case KVM_CAP_S390_GMAP:
22152e16b18SChristian Borntraeger 	case KVM_CAP_SYNC_MMU:
2221efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
2231efd0f59SCarsten Otte 	case KVM_CAP_S390_UCONTROL:
2241efd0f59SCarsten Otte #endif
2253c038e6bSDominik Dingel 	case KVM_CAP_ASYNC_PF:
22660b413c9SChristian Borntraeger 	case KVM_CAP_SYNC_REGS:
22714eebd91SCarsten Otte 	case KVM_CAP_ONE_REG:
228d6712df9SCornelia Huck 	case KVM_CAP_ENABLE_CAP:
229fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
23010ccaa1eSCornelia Huck 	case KVM_CAP_IOEVENTFD:
231c05c4186SJens Freimann 	case KVM_CAP_DEVICE_CTRL:
232d938dc55SCornelia Huck 	case KVM_CAP_ENABLE_CAP_VM:
23378599d90SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
234f2061656SDominik Dingel 	case KVM_CAP_VM_ATTRIBUTES:
2356352e4d2SDavid Hildenbrand 	case KVM_CAP_MP_STATE:
23647b43c52SJens Freimann 	case KVM_CAP_S390_INJECT_IRQ:
2372444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
238e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
23930ee2a98SJason J. Herne 	case KVM_CAP_S390_SKEYS:
240816c7667SJens Freimann 	case KVM_CAP_S390_IRQ_STATE:
241d7b0b5ebSCarsten Otte 		r = 1;
242d7b0b5ebSCarsten Otte 		break;
24341408c28SThomas Huth 	case KVM_CAP_S390_MEM_OP:
24441408c28SThomas Huth 		r = MEM_OP_MAX_SIZE;
24541408c28SThomas Huth 		break;
246e726b1bdSChristian Borntraeger 	case KVM_CAP_NR_VCPUS:
247e726b1bdSChristian Borntraeger 	case KVM_CAP_MAX_VCPUS:
248e726b1bdSChristian Borntraeger 		r = KVM_MAX_VCPUS;
249e726b1bdSChristian Borntraeger 		break;
250e1e2e605SNick Wang 	case KVM_CAP_NR_MEMSLOTS:
251e1e2e605SNick Wang 		r = KVM_USER_MEM_SLOTS;
252e1e2e605SNick Wang 		break;
2531526bf9cSChristian Borntraeger 	case KVM_CAP_S390_COW:
254abf09bedSMartin Schwidefsky 		r = MACHINE_HAS_ESOP;
2551526bf9cSChristian Borntraeger 		break;
25668c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
25768c55750SEric Farman 		r = MACHINE_HAS_VX;
25868c55750SEric Farman 		break;
2592bd0ac4eSCarsten Otte 	default:
260d7b0b5ebSCarsten Otte 		r = 0;
261b0c632dbSHeiko Carstens 	}
262d7b0b5ebSCarsten Otte 	return r;
2632bd0ac4eSCarsten Otte }
264b0c632dbSHeiko Carstens 
26515f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm,
26615f36ebdSJason J. Herne 					struct kvm_memory_slot *memslot)
26715f36ebdSJason J. Herne {
26815f36ebdSJason J. Herne 	gfn_t cur_gfn, last_gfn;
26915f36ebdSJason J. Herne 	unsigned long address;
27015f36ebdSJason J. Herne 	struct gmap *gmap = kvm->arch.gmap;
27115f36ebdSJason J. Herne 
27215f36ebdSJason J. Herne 	down_read(&gmap->mm->mmap_sem);
27315f36ebdSJason J. Herne 	/* Loop over all guest pages */
27415f36ebdSJason J. Herne 	last_gfn = memslot->base_gfn + memslot->npages;
27515f36ebdSJason J. Herne 	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
27615f36ebdSJason J. Herne 		address = gfn_to_hva_memslot(memslot, cur_gfn);
27715f36ebdSJason J. Herne 
27815f36ebdSJason J. Herne 		if (gmap_test_and_clear_dirty(address, gmap))
27915f36ebdSJason J. Herne 			mark_page_dirty(kvm, cur_gfn);
28015f36ebdSJason J. Herne 	}
28115f36ebdSJason J. Herne 	up_read(&gmap->mm->mmap_sem);
28215f36ebdSJason J. Herne }
28315f36ebdSJason J. Herne 
284b0c632dbSHeiko Carstens /* Section: vm related */
285b0c632dbSHeiko Carstens /*
286b0c632dbSHeiko Carstens  * Get (and clear) the dirty memory log for a memory slot.
287b0c632dbSHeiko Carstens  */
288b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
289b0c632dbSHeiko Carstens 			       struct kvm_dirty_log *log)
290b0c632dbSHeiko Carstens {
29115f36ebdSJason J. Herne 	int r;
29215f36ebdSJason J. Herne 	unsigned long n;
2939f6b8029SPaolo Bonzini 	struct kvm_memslots *slots;
29415f36ebdSJason J. Herne 	struct kvm_memory_slot *memslot;
29515f36ebdSJason J. Herne 	int is_dirty = 0;
29615f36ebdSJason J. Herne 
29715f36ebdSJason J. Herne 	mutex_lock(&kvm->slots_lock);
29815f36ebdSJason J. Herne 
29915f36ebdSJason J. Herne 	r = -EINVAL;
30015f36ebdSJason J. Herne 	if (log->slot >= KVM_USER_MEM_SLOTS)
30115f36ebdSJason J. Herne 		goto out;
30215f36ebdSJason J. Herne 
3039f6b8029SPaolo Bonzini 	slots = kvm_memslots(kvm);
3049f6b8029SPaolo Bonzini 	memslot = id_to_memslot(slots, log->slot);
30515f36ebdSJason J. Herne 	r = -ENOENT;
30615f36ebdSJason J. Herne 	if (!memslot->dirty_bitmap)
30715f36ebdSJason J. Herne 		goto out;
30815f36ebdSJason J. Herne 
30915f36ebdSJason J. Herne 	kvm_s390_sync_dirty_log(kvm, memslot);
31015f36ebdSJason J. Herne 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
31115f36ebdSJason J. Herne 	if (r)
31215f36ebdSJason J. Herne 		goto out;
31315f36ebdSJason J. Herne 
31415f36ebdSJason J. Herne 	/* Clear the dirty log */
31515f36ebdSJason J. Herne 	if (is_dirty) {
31615f36ebdSJason J. Herne 		n = kvm_dirty_bitmap_bytes(memslot);
31715f36ebdSJason J. Herne 		memset(memslot->dirty_bitmap, 0, n);
31815f36ebdSJason J. Herne 	}
31915f36ebdSJason J. Herne 	r = 0;
32015f36ebdSJason J. Herne out:
32115f36ebdSJason J. Herne 	mutex_unlock(&kvm->slots_lock);
32215f36ebdSJason J. Herne 	return r;
323b0c632dbSHeiko Carstens }
324b0c632dbSHeiko Carstens 
325d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
326d938dc55SCornelia Huck {
327d938dc55SCornelia Huck 	int r;
328d938dc55SCornelia Huck 
329d938dc55SCornelia Huck 	if (cap->flags)
330d938dc55SCornelia Huck 		return -EINVAL;
331d938dc55SCornelia Huck 
332d938dc55SCornelia Huck 	switch (cap->cap) {
33384223598SCornelia Huck 	case KVM_CAP_S390_IRQCHIP:
334c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
33584223598SCornelia Huck 		kvm->arch.use_irqchip = 1;
33684223598SCornelia Huck 		r = 0;
33784223598SCornelia Huck 		break;
3382444b352SDavid Hildenbrand 	case KVM_CAP_S390_USER_SIGP:
339c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
3402444b352SDavid Hildenbrand 		kvm->arch.user_sigp = 1;
3412444b352SDavid Hildenbrand 		r = 0;
3422444b352SDavid Hildenbrand 		break;
34368c55750SEric Farman 	case KVM_CAP_S390_VECTOR_REGISTERS:
34418280d8bSMichael Mueller 		if (MACHINE_HAS_VX) {
34518280d8bSMichael Mueller 			set_kvm_facility(kvm->arch.model.fac->mask, 129);
34618280d8bSMichael Mueller 			set_kvm_facility(kvm->arch.model.fac->list, 129);
34718280d8bSMichael Mueller 			r = 0;
34818280d8bSMichael Mueller 		} else
34918280d8bSMichael Mueller 			r = -EINVAL;
350c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
351c92ea7b9SChristian Borntraeger 			 r ? "(not available)" : "(success)");
35268c55750SEric Farman 		break;
353e44fc8c9SEkaterina Tumanova 	case KVM_CAP_S390_USER_STSI:
354c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
355e44fc8c9SEkaterina Tumanova 		kvm->arch.user_stsi = 1;
356e44fc8c9SEkaterina Tumanova 		r = 0;
357e44fc8c9SEkaterina Tumanova 		break;
358d938dc55SCornelia Huck 	default:
359d938dc55SCornelia Huck 		r = -EINVAL;
360d938dc55SCornelia Huck 		break;
361d938dc55SCornelia Huck 	}
362d938dc55SCornelia Huck 	return r;
363d938dc55SCornelia Huck }
364d938dc55SCornelia Huck 
3658c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3668c0a7ce6SDominik Dingel {
3678c0a7ce6SDominik Dingel 	int ret;
3688c0a7ce6SDominik Dingel 
3698c0a7ce6SDominik Dingel 	switch (attr->attr) {
3708c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE:
3718c0a7ce6SDominik Dingel 		ret = 0;
372c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
373c92ea7b9SChristian Borntraeger 			 kvm->arch.gmap->asce_end);
3748c0a7ce6SDominik Dingel 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
3758c0a7ce6SDominik Dingel 			ret = -EFAULT;
3768c0a7ce6SDominik Dingel 		break;
3778c0a7ce6SDominik Dingel 	default:
3788c0a7ce6SDominik Dingel 		ret = -ENXIO;
3798c0a7ce6SDominik Dingel 		break;
3808c0a7ce6SDominik Dingel 	}
3818c0a7ce6SDominik Dingel 	return ret;
3828c0a7ce6SDominik Dingel }
3838c0a7ce6SDominik Dingel 
3848c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
3854f718eabSDominik Dingel {
3864f718eabSDominik Dingel 	int ret;
3874f718eabSDominik Dingel 	unsigned int idx;
3884f718eabSDominik Dingel 	switch (attr->attr) {
3894f718eabSDominik Dingel 	case KVM_S390_VM_MEM_ENABLE_CMMA:
390e6db1d61SDominik Dingel 		/* enable CMMA only for z10 and later (EDAT_1) */
391e6db1d61SDominik Dingel 		ret = -EINVAL;
392e6db1d61SDominik Dingel 		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
393e6db1d61SDominik Dingel 			break;
394e6db1d61SDominik Dingel 
3954f718eabSDominik Dingel 		ret = -EBUSY;
396c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
3974f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
3984f718eabSDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
3994f718eabSDominik Dingel 			kvm->arch.use_cmma = 1;
4004f718eabSDominik Dingel 			ret = 0;
4014f718eabSDominik Dingel 		}
4024f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
4034f718eabSDominik Dingel 		break;
4044f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CLR_CMMA:
405c3489155SDominik Dingel 		ret = -EINVAL;
406c3489155SDominik Dingel 		if (!kvm->arch.use_cmma)
407c3489155SDominik Dingel 			break;
408c3489155SDominik Dingel 
409c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4104f718eabSDominik Dingel 		mutex_lock(&kvm->lock);
4114f718eabSDominik Dingel 		idx = srcu_read_lock(&kvm->srcu);
412a13cff31SDominik Dingel 		s390_reset_cmma(kvm->arch.gmap->mm);
4134f718eabSDominik Dingel 		srcu_read_unlock(&kvm->srcu, idx);
4144f718eabSDominik Dingel 		mutex_unlock(&kvm->lock);
4154f718eabSDominik Dingel 		ret = 0;
4164f718eabSDominik Dingel 		break;
4178c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_LIMIT_SIZE: {
4188c0a7ce6SDominik Dingel 		unsigned long new_limit;
4198c0a7ce6SDominik Dingel 
4208c0a7ce6SDominik Dingel 		if (kvm_is_ucontrol(kvm))
4218c0a7ce6SDominik Dingel 			return -EINVAL;
4228c0a7ce6SDominik Dingel 
4238c0a7ce6SDominik Dingel 		if (get_user(new_limit, (u64 __user *)attr->addr))
4248c0a7ce6SDominik Dingel 			return -EFAULT;
4258c0a7ce6SDominik Dingel 
4268c0a7ce6SDominik Dingel 		if (new_limit > kvm->arch.gmap->asce_end)
4278c0a7ce6SDominik Dingel 			return -E2BIG;
4288c0a7ce6SDominik Dingel 
4298c0a7ce6SDominik Dingel 		ret = -EBUSY;
4308c0a7ce6SDominik Dingel 		mutex_lock(&kvm->lock);
4318c0a7ce6SDominik Dingel 		if (atomic_read(&kvm->online_vcpus) == 0) {
4328c0a7ce6SDominik Dingel 			/* gmap_alloc will round the limit up */
4338c0a7ce6SDominik Dingel 			struct gmap *new = gmap_alloc(current->mm, new_limit);
4348c0a7ce6SDominik Dingel 
4358c0a7ce6SDominik Dingel 			if (!new) {
4368c0a7ce6SDominik Dingel 				ret = -ENOMEM;
4378c0a7ce6SDominik Dingel 			} else {
4388c0a7ce6SDominik Dingel 				gmap_free(kvm->arch.gmap);
4398c0a7ce6SDominik Dingel 				new->private = kvm;
4408c0a7ce6SDominik Dingel 				kvm->arch.gmap = new;
4418c0a7ce6SDominik Dingel 				ret = 0;
4428c0a7ce6SDominik Dingel 			}
4438c0a7ce6SDominik Dingel 		}
4448c0a7ce6SDominik Dingel 		mutex_unlock(&kvm->lock);
445c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
4468c0a7ce6SDominik Dingel 		break;
4478c0a7ce6SDominik Dingel 	}
4484f718eabSDominik Dingel 	default:
4494f718eabSDominik Dingel 		ret = -ENXIO;
4504f718eabSDominik Dingel 		break;
4514f718eabSDominik Dingel 	}
4524f718eabSDominik Dingel 	return ret;
4534f718eabSDominik Dingel }
4544f718eabSDominik Dingel 
455a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
456a374e892STony Krowiak 
457a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
458a374e892STony Krowiak {
459a374e892STony Krowiak 	struct kvm_vcpu *vcpu;
460a374e892STony Krowiak 	int i;
461a374e892STony Krowiak 
4629d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
463a374e892STony Krowiak 		return -EINVAL;
464a374e892STony Krowiak 
465a374e892STony Krowiak 	mutex_lock(&kvm->lock);
466a374e892STony Krowiak 	switch (attr->attr) {
467a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
468a374e892STony Krowiak 		get_random_bytes(
469a374e892STony Krowiak 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
470a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
471a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 1;
472c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
473a374e892STony Krowiak 		break;
474a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
475a374e892STony Krowiak 		get_random_bytes(
476a374e892STony Krowiak 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
477a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
478a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 1;
479c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
480a374e892STony Krowiak 		break;
481a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
482a374e892STony Krowiak 		kvm->arch.crypto.aes_kw = 0;
483a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
484a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
485c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
486a374e892STony Krowiak 		break;
487a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
488a374e892STony Krowiak 		kvm->arch.crypto.dea_kw = 0;
489a374e892STony Krowiak 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
490a374e892STony Krowiak 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
491c92ea7b9SChristian Borntraeger 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
492a374e892STony Krowiak 		break;
493a374e892STony Krowiak 	default:
494a374e892STony Krowiak 		mutex_unlock(&kvm->lock);
495a374e892STony Krowiak 		return -ENXIO;
496a374e892STony Krowiak 	}
497a374e892STony Krowiak 
498a374e892STony Krowiak 	kvm_for_each_vcpu(i, vcpu, kvm) {
499a374e892STony Krowiak 		kvm_s390_vcpu_crypto_setup(vcpu);
500a374e892STony Krowiak 		exit_sie(vcpu);
501a374e892STony Krowiak 	}
502a374e892STony Krowiak 	mutex_unlock(&kvm->lock);
503a374e892STony Krowiak 	return 0;
504a374e892STony Krowiak }
505a374e892STony Krowiak 
50672f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
50772f25020SJason J. Herne {
50872f25020SJason J. Herne 	u8 gtod_high;
50972f25020SJason J. Herne 
51072f25020SJason J. Herne 	if (copy_from_user(&gtod_high, (void __user *)attr->addr,
51172f25020SJason J. Herne 					   sizeof(gtod_high)))
51272f25020SJason J. Herne 		return -EFAULT;
51372f25020SJason J. Herne 
51472f25020SJason J. Herne 	if (gtod_high != 0)
51572f25020SJason J. Herne 		return -EINVAL;
516c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
51772f25020SJason J. Herne 
51872f25020SJason J. Herne 	return 0;
51972f25020SJason J. Herne }
52072f25020SJason J. Herne 
52172f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
52272f25020SJason J. Herne {
52372f25020SJason J. Herne 	struct kvm_vcpu *cur_vcpu;
52472f25020SJason J. Herne 	unsigned int vcpu_idx;
52572f25020SJason J. Herne 	u64 host_tod, gtod;
52672f25020SJason J. Herne 	int r;
52772f25020SJason J. Herne 
52872f25020SJason J. Herne 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
52972f25020SJason J. Herne 		return -EFAULT;
53072f25020SJason J. Herne 
53172f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
53272f25020SJason J. Herne 	if (r)
53372f25020SJason J. Herne 		return r;
53472f25020SJason J. Herne 
53572f25020SJason J. Herne 	mutex_lock(&kvm->lock);
536fdf03650SFan Zhang 	preempt_disable();
53772f25020SJason J. Herne 	kvm->arch.epoch = gtod - host_tod;
53827406cd5SChristian Borntraeger 	kvm_s390_vcpu_block_all(kvm);
53927406cd5SChristian Borntraeger 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
54072f25020SJason J. Herne 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
54127406cd5SChristian Borntraeger 	kvm_s390_vcpu_unblock_all(kvm);
542fdf03650SFan Zhang 	preempt_enable();
54372f25020SJason J. Herne 	mutex_unlock(&kvm->lock);
544c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
54572f25020SJason J. Herne 	return 0;
54672f25020SJason J. Herne }
54772f25020SJason J. Herne 
54872f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
54972f25020SJason J. Herne {
55072f25020SJason J. Herne 	int ret;
55172f25020SJason J. Herne 
55272f25020SJason J. Herne 	if (attr->flags)
55372f25020SJason J. Herne 		return -EINVAL;
55472f25020SJason J. Herne 
55572f25020SJason J. Herne 	switch (attr->attr) {
55672f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
55772f25020SJason J. Herne 		ret = kvm_s390_set_tod_high(kvm, attr);
55872f25020SJason J. Herne 		break;
55972f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
56072f25020SJason J. Herne 		ret = kvm_s390_set_tod_low(kvm, attr);
56172f25020SJason J. Herne 		break;
56272f25020SJason J. Herne 	default:
56372f25020SJason J. Herne 		ret = -ENXIO;
56472f25020SJason J. Herne 		break;
56572f25020SJason J. Herne 	}
56672f25020SJason J. Herne 	return ret;
56772f25020SJason J. Herne }
56872f25020SJason J. Herne 
56972f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
57072f25020SJason J. Herne {
57172f25020SJason J. Herne 	u8 gtod_high = 0;
57272f25020SJason J. Herne 
57372f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
57472f25020SJason J. Herne 					 sizeof(gtod_high)))
57572f25020SJason J. Herne 		return -EFAULT;
576c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
57772f25020SJason J. Herne 
57872f25020SJason J. Herne 	return 0;
57972f25020SJason J. Herne }
58072f25020SJason J. Herne 
58172f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
58272f25020SJason J. Herne {
58372f25020SJason J. Herne 	u64 host_tod, gtod;
58472f25020SJason J. Herne 	int r;
58572f25020SJason J. Herne 
58672f25020SJason J. Herne 	r = store_tod_clock(&host_tod);
58772f25020SJason J. Herne 	if (r)
58872f25020SJason J. Herne 		return r;
58972f25020SJason J. Herne 
590fdf03650SFan Zhang 	preempt_disable();
59172f25020SJason J. Herne 	gtod = host_tod + kvm->arch.epoch;
592fdf03650SFan Zhang 	preempt_enable();
59372f25020SJason J. Herne 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
59472f25020SJason J. Herne 		return -EFAULT;
595c92ea7b9SChristian Borntraeger 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
59672f25020SJason J. Herne 
59772f25020SJason J. Herne 	return 0;
59872f25020SJason J. Herne }
59972f25020SJason J. Herne 
60072f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
60172f25020SJason J. Herne {
60272f25020SJason J. Herne 	int ret;
60372f25020SJason J. Herne 
60472f25020SJason J. Herne 	if (attr->flags)
60572f25020SJason J. Herne 		return -EINVAL;
60672f25020SJason J. Herne 
60772f25020SJason J. Herne 	switch (attr->attr) {
60872f25020SJason J. Herne 	case KVM_S390_VM_TOD_HIGH:
60972f25020SJason J. Herne 		ret = kvm_s390_get_tod_high(kvm, attr);
61072f25020SJason J. Herne 		break;
61172f25020SJason J. Herne 	case KVM_S390_VM_TOD_LOW:
61272f25020SJason J. Herne 		ret = kvm_s390_get_tod_low(kvm, attr);
61372f25020SJason J. Herne 		break;
61472f25020SJason J. Herne 	default:
61572f25020SJason J. Herne 		ret = -ENXIO;
61672f25020SJason J. Herne 		break;
61772f25020SJason J. Herne 	}
61872f25020SJason J. Herne 	return ret;
61972f25020SJason J. Herne }
62072f25020SJason J. Herne 
621658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
622658b6edaSMichael Mueller {
623658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
624658b6edaSMichael Mueller 	int ret = 0;
625658b6edaSMichael Mueller 
626658b6edaSMichael Mueller 	mutex_lock(&kvm->lock);
627658b6edaSMichael Mueller 	if (atomic_read(&kvm->online_vcpus)) {
628658b6edaSMichael Mueller 		ret = -EBUSY;
629658b6edaSMichael Mueller 		goto out;
630658b6edaSMichael Mueller 	}
631658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
632658b6edaSMichael Mueller 	if (!proc) {
633658b6edaSMichael Mueller 		ret = -ENOMEM;
634658b6edaSMichael Mueller 		goto out;
635658b6edaSMichael Mueller 	}
636658b6edaSMichael Mueller 	if (!copy_from_user(proc, (void __user *)attr->addr,
637658b6edaSMichael Mueller 			    sizeof(*proc))) {
638658b6edaSMichael Mueller 		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
639658b6edaSMichael Mueller 		       sizeof(struct cpuid));
640658b6edaSMichael Mueller 		kvm->arch.model.ibc = proc->ibc;
641981467c9SMichael Mueller 		memcpy(kvm->arch.model.fac->list, proc->fac_list,
642658b6edaSMichael Mueller 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
643658b6edaSMichael Mueller 	} else
644658b6edaSMichael Mueller 		ret = -EFAULT;
645658b6edaSMichael Mueller 	kfree(proc);
646658b6edaSMichael Mueller out:
647658b6edaSMichael Mueller 	mutex_unlock(&kvm->lock);
648658b6edaSMichael Mueller 	return ret;
649658b6edaSMichael Mueller }
650658b6edaSMichael Mueller 
651658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
652658b6edaSMichael Mueller {
653658b6edaSMichael Mueller 	int ret = -ENXIO;
654658b6edaSMichael Mueller 
655658b6edaSMichael Mueller 	switch (attr->attr) {
656658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
657658b6edaSMichael Mueller 		ret = kvm_s390_set_processor(kvm, attr);
658658b6edaSMichael Mueller 		break;
659658b6edaSMichael Mueller 	}
660658b6edaSMichael Mueller 	return ret;
661658b6edaSMichael Mueller }
662658b6edaSMichael Mueller 
663658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
664658b6edaSMichael Mueller {
665658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_processor *proc;
666658b6edaSMichael Mueller 	int ret = 0;
667658b6edaSMichael Mueller 
668658b6edaSMichael Mueller 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
669658b6edaSMichael Mueller 	if (!proc) {
670658b6edaSMichael Mueller 		ret = -ENOMEM;
671658b6edaSMichael Mueller 		goto out;
672658b6edaSMichael Mueller 	}
673658b6edaSMichael Mueller 	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
674658b6edaSMichael Mueller 	proc->ibc = kvm->arch.model.ibc;
675981467c9SMichael Mueller 	memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
676658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
677658b6edaSMichael Mueller 		ret = -EFAULT;
678658b6edaSMichael Mueller 	kfree(proc);
679658b6edaSMichael Mueller out:
680658b6edaSMichael Mueller 	return ret;
681658b6edaSMichael Mueller }
682658b6edaSMichael Mueller 
683658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
684658b6edaSMichael Mueller {
685658b6edaSMichael Mueller 	struct kvm_s390_vm_cpu_machine *mach;
686658b6edaSMichael Mueller 	int ret = 0;
687658b6edaSMichael Mueller 
688658b6edaSMichael Mueller 	mach = kzalloc(sizeof(*mach), GFP_KERNEL);
689658b6edaSMichael Mueller 	if (!mach) {
690658b6edaSMichael Mueller 		ret = -ENOMEM;
691658b6edaSMichael Mueller 		goto out;
692658b6edaSMichael Mueller 	}
693658b6edaSMichael Mueller 	get_cpu_id((struct cpuid *) &mach->cpuid);
69437c5f6c8SDavid Hildenbrand 	mach->ibc = sclp.ibc;
695981467c9SMichael Mueller 	memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
696981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
697658b6edaSMichael Mueller 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
69894422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
699658b6edaSMichael Mueller 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
700658b6edaSMichael Mueller 		ret = -EFAULT;
701658b6edaSMichael Mueller 	kfree(mach);
702658b6edaSMichael Mueller out:
703658b6edaSMichael Mueller 	return ret;
704658b6edaSMichael Mueller }
705658b6edaSMichael Mueller 
706658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
707658b6edaSMichael Mueller {
708658b6edaSMichael Mueller 	int ret = -ENXIO;
709658b6edaSMichael Mueller 
710658b6edaSMichael Mueller 	switch (attr->attr) {
711658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_PROCESSOR:
712658b6edaSMichael Mueller 		ret = kvm_s390_get_processor(kvm, attr);
713658b6edaSMichael Mueller 		break;
714658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MACHINE:
715658b6edaSMichael Mueller 		ret = kvm_s390_get_machine(kvm, attr);
716658b6edaSMichael Mueller 		break;
717658b6edaSMichael Mueller 	}
718658b6edaSMichael Mueller 	return ret;
719658b6edaSMichael Mueller }
720658b6edaSMichael Mueller 
721f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
722f2061656SDominik Dingel {
723f2061656SDominik Dingel 	int ret;
724f2061656SDominik Dingel 
725f2061656SDominik Dingel 	switch (attr->group) {
7264f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7278c0a7ce6SDominik Dingel 		ret = kvm_s390_set_mem_control(kvm, attr);
7284f718eabSDominik Dingel 		break;
72972f25020SJason J. Herne 	case KVM_S390_VM_TOD:
73072f25020SJason J. Herne 		ret = kvm_s390_set_tod(kvm, attr);
73172f25020SJason J. Herne 		break;
732658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
733658b6edaSMichael Mueller 		ret = kvm_s390_set_cpu_model(kvm, attr);
734658b6edaSMichael Mueller 		break;
735a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
736a374e892STony Krowiak 		ret = kvm_s390_vm_set_crypto(kvm, attr);
737a374e892STony Krowiak 		break;
738f2061656SDominik Dingel 	default:
739f2061656SDominik Dingel 		ret = -ENXIO;
740f2061656SDominik Dingel 		break;
741f2061656SDominik Dingel 	}
742f2061656SDominik Dingel 
743f2061656SDominik Dingel 	return ret;
744f2061656SDominik Dingel }
745f2061656SDominik Dingel 
746f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
747f2061656SDominik Dingel {
7488c0a7ce6SDominik Dingel 	int ret;
7498c0a7ce6SDominik Dingel 
7508c0a7ce6SDominik Dingel 	switch (attr->group) {
7518c0a7ce6SDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7528c0a7ce6SDominik Dingel 		ret = kvm_s390_get_mem_control(kvm, attr);
7538c0a7ce6SDominik Dingel 		break;
75472f25020SJason J. Herne 	case KVM_S390_VM_TOD:
75572f25020SJason J. Herne 		ret = kvm_s390_get_tod(kvm, attr);
75672f25020SJason J. Herne 		break;
757658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
758658b6edaSMichael Mueller 		ret = kvm_s390_get_cpu_model(kvm, attr);
759658b6edaSMichael Mueller 		break;
7608c0a7ce6SDominik Dingel 	default:
7618c0a7ce6SDominik Dingel 		ret = -ENXIO;
7628c0a7ce6SDominik Dingel 		break;
7638c0a7ce6SDominik Dingel 	}
7648c0a7ce6SDominik Dingel 
7658c0a7ce6SDominik Dingel 	return ret;
766f2061656SDominik Dingel }
767f2061656SDominik Dingel 
768f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
769f2061656SDominik Dingel {
770f2061656SDominik Dingel 	int ret;
771f2061656SDominik Dingel 
772f2061656SDominik Dingel 	switch (attr->group) {
7734f718eabSDominik Dingel 	case KVM_S390_VM_MEM_CTRL:
7744f718eabSDominik Dingel 		switch (attr->attr) {
7754f718eabSDominik Dingel 		case KVM_S390_VM_MEM_ENABLE_CMMA:
7764f718eabSDominik Dingel 		case KVM_S390_VM_MEM_CLR_CMMA:
7778c0a7ce6SDominik Dingel 		case KVM_S390_VM_MEM_LIMIT_SIZE:
7784f718eabSDominik Dingel 			ret = 0;
7794f718eabSDominik Dingel 			break;
7804f718eabSDominik Dingel 		default:
7814f718eabSDominik Dingel 			ret = -ENXIO;
7824f718eabSDominik Dingel 			break;
7834f718eabSDominik Dingel 		}
7844f718eabSDominik Dingel 		break;
78572f25020SJason J. Herne 	case KVM_S390_VM_TOD:
78672f25020SJason J. Herne 		switch (attr->attr) {
78772f25020SJason J. Herne 		case KVM_S390_VM_TOD_LOW:
78872f25020SJason J. Herne 		case KVM_S390_VM_TOD_HIGH:
78972f25020SJason J. Herne 			ret = 0;
79072f25020SJason J. Herne 			break;
79172f25020SJason J. Herne 		default:
79272f25020SJason J. Herne 			ret = -ENXIO;
79372f25020SJason J. Herne 			break;
79472f25020SJason J. Herne 		}
79572f25020SJason J. Herne 		break;
796658b6edaSMichael Mueller 	case KVM_S390_VM_CPU_MODEL:
797658b6edaSMichael Mueller 		switch (attr->attr) {
798658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_PROCESSOR:
799658b6edaSMichael Mueller 		case KVM_S390_VM_CPU_MACHINE:
800658b6edaSMichael Mueller 			ret = 0;
801658b6edaSMichael Mueller 			break;
802658b6edaSMichael Mueller 		default:
803658b6edaSMichael Mueller 			ret = -ENXIO;
804658b6edaSMichael Mueller 			break;
805658b6edaSMichael Mueller 		}
806658b6edaSMichael Mueller 		break;
807a374e892STony Krowiak 	case KVM_S390_VM_CRYPTO:
808a374e892STony Krowiak 		switch (attr->attr) {
809a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
810a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
811a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
812a374e892STony Krowiak 		case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
813a374e892STony Krowiak 			ret = 0;
814a374e892STony Krowiak 			break;
815a374e892STony Krowiak 		default:
816a374e892STony Krowiak 			ret = -ENXIO;
817a374e892STony Krowiak 			break;
818a374e892STony Krowiak 		}
819a374e892STony Krowiak 		break;
820f2061656SDominik Dingel 	default:
821f2061656SDominik Dingel 		ret = -ENXIO;
822f2061656SDominik Dingel 		break;
823f2061656SDominik Dingel 	}
824f2061656SDominik Dingel 
825f2061656SDominik Dingel 	return ret;
826f2061656SDominik Dingel }
827f2061656SDominik Dingel 
82830ee2a98SJason J. Herne static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
82930ee2a98SJason J. Herne {
83030ee2a98SJason J. Herne 	uint8_t *keys;
83130ee2a98SJason J. Herne 	uint64_t hva;
83230ee2a98SJason J. Herne 	unsigned long curkey;
83330ee2a98SJason J. Herne 	int i, r = 0;
83430ee2a98SJason J. Herne 
83530ee2a98SJason J. Herne 	if (args->flags != 0)
83630ee2a98SJason J. Herne 		return -EINVAL;
83730ee2a98SJason J. Herne 
83830ee2a98SJason J. Herne 	/* Is this guest using storage keys? */
83930ee2a98SJason J. Herne 	if (!mm_use_skey(current->mm))
84030ee2a98SJason J. Herne 		return KVM_S390_GET_SKEYS_NONE;
84130ee2a98SJason J. Herne 
84230ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
84330ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
84430ee2a98SJason J. Herne 		return -EINVAL;
84530ee2a98SJason J. Herne 
84630ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
84730ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
84830ee2a98SJason J. Herne 	if (!keys)
84930ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
85030ee2a98SJason J. Herne 	if (!keys)
85130ee2a98SJason J. Herne 		return -ENOMEM;
85230ee2a98SJason J. Herne 
85330ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
85430ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
85530ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
85630ee2a98SJason J. Herne 			r = -EFAULT;
85730ee2a98SJason J. Herne 			goto out;
85830ee2a98SJason J. Herne 		}
85930ee2a98SJason J. Herne 
86030ee2a98SJason J. Herne 		curkey = get_guest_storage_key(current->mm, hva);
86130ee2a98SJason J. Herne 		if (IS_ERR_VALUE(curkey)) {
86230ee2a98SJason J. Herne 			r = curkey;
86330ee2a98SJason J. Herne 			goto out;
86430ee2a98SJason J. Herne 		}
86530ee2a98SJason J. Herne 		keys[i] = curkey;
86630ee2a98SJason J. Herne 	}
86730ee2a98SJason J. Herne 
86830ee2a98SJason J. Herne 	r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
86930ee2a98SJason J. Herne 			 sizeof(uint8_t) * args->count);
87030ee2a98SJason J. Herne 	if (r)
87130ee2a98SJason J. Herne 		r = -EFAULT;
87230ee2a98SJason J. Herne out:
87330ee2a98SJason J. Herne 	kvfree(keys);
87430ee2a98SJason J. Herne 	return r;
87530ee2a98SJason J. Herne }
87630ee2a98SJason J. Herne 
87730ee2a98SJason J. Herne static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
87830ee2a98SJason J. Herne {
87930ee2a98SJason J. Herne 	uint8_t *keys;
88030ee2a98SJason J. Herne 	uint64_t hva;
88130ee2a98SJason J. Herne 	int i, r = 0;
88230ee2a98SJason J. Herne 
88330ee2a98SJason J. Herne 	if (args->flags != 0)
88430ee2a98SJason J. Herne 		return -EINVAL;
88530ee2a98SJason J. Herne 
88630ee2a98SJason J. Herne 	/* Enforce sane limit on memory allocation */
88730ee2a98SJason J. Herne 	if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
88830ee2a98SJason J. Herne 		return -EINVAL;
88930ee2a98SJason J. Herne 
89030ee2a98SJason J. Herne 	keys = kmalloc_array(args->count, sizeof(uint8_t),
89130ee2a98SJason J. Herne 			     GFP_KERNEL | __GFP_NOWARN);
89230ee2a98SJason J. Herne 	if (!keys)
89330ee2a98SJason J. Herne 		keys = vmalloc(sizeof(uint8_t) * args->count);
89430ee2a98SJason J. Herne 	if (!keys)
89530ee2a98SJason J. Herne 		return -ENOMEM;
89630ee2a98SJason J. Herne 
89730ee2a98SJason J. Herne 	r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
89830ee2a98SJason J. Herne 			   sizeof(uint8_t) * args->count);
89930ee2a98SJason J. Herne 	if (r) {
90030ee2a98SJason J. Herne 		r = -EFAULT;
90130ee2a98SJason J. Herne 		goto out;
90230ee2a98SJason J. Herne 	}
90330ee2a98SJason J. Herne 
90430ee2a98SJason J. Herne 	/* Enable storage key handling for the guest */
90514d4a425SDominik Dingel 	r = s390_enable_skey();
90614d4a425SDominik Dingel 	if (r)
90714d4a425SDominik Dingel 		goto out;
90830ee2a98SJason J. Herne 
90930ee2a98SJason J. Herne 	for (i = 0; i < args->count; i++) {
91030ee2a98SJason J. Herne 		hva = gfn_to_hva(kvm, args->start_gfn + i);
91130ee2a98SJason J. Herne 		if (kvm_is_error_hva(hva)) {
91230ee2a98SJason J. Herne 			r = -EFAULT;
91330ee2a98SJason J. Herne 			goto out;
91430ee2a98SJason J. Herne 		}
91530ee2a98SJason J. Herne 
91630ee2a98SJason J. Herne 		/* Lowest order bit is reserved */
91730ee2a98SJason J. Herne 		if (keys[i] & 0x01) {
91830ee2a98SJason J. Herne 			r = -EINVAL;
91930ee2a98SJason J. Herne 			goto out;
92030ee2a98SJason J. Herne 		}
92130ee2a98SJason J. Herne 
92230ee2a98SJason J. Herne 		r = set_guest_storage_key(current->mm, hva,
92330ee2a98SJason J. Herne 					  (unsigned long)keys[i], 0);
92430ee2a98SJason J. Herne 		if (r)
92530ee2a98SJason J. Herne 			goto out;
92630ee2a98SJason J. Herne 	}
92730ee2a98SJason J. Herne out:
92830ee2a98SJason J. Herne 	kvfree(keys);
92930ee2a98SJason J. Herne 	return r;
93030ee2a98SJason J. Herne }
93130ee2a98SJason J. Herne 
932b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp,
933b0c632dbSHeiko Carstens 		       unsigned int ioctl, unsigned long arg)
934b0c632dbSHeiko Carstens {
935b0c632dbSHeiko Carstens 	struct kvm *kvm = filp->private_data;
936b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
937f2061656SDominik Dingel 	struct kvm_device_attr attr;
938b0c632dbSHeiko Carstens 	int r;
939b0c632dbSHeiko Carstens 
940b0c632dbSHeiko Carstens 	switch (ioctl) {
941ba5c1e9bSCarsten Otte 	case KVM_S390_INTERRUPT: {
942ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
943ba5c1e9bSCarsten Otte 
944ba5c1e9bSCarsten Otte 		r = -EFAULT;
945ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
946ba5c1e9bSCarsten Otte 			break;
947ba5c1e9bSCarsten Otte 		r = kvm_s390_inject_vm(kvm, &s390int);
948ba5c1e9bSCarsten Otte 		break;
949ba5c1e9bSCarsten Otte 	}
950d938dc55SCornelia Huck 	case KVM_ENABLE_CAP: {
951d938dc55SCornelia Huck 		struct kvm_enable_cap cap;
952d938dc55SCornelia Huck 		r = -EFAULT;
953d938dc55SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
954d938dc55SCornelia Huck 			break;
955d938dc55SCornelia Huck 		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
956d938dc55SCornelia Huck 		break;
957d938dc55SCornelia Huck 	}
95884223598SCornelia Huck 	case KVM_CREATE_IRQCHIP: {
95984223598SCornelia Huck 		struct kvm_irq_routing_entry routing;
96084223598SCornelia Huck 
96184223598SCornelia Huck 		r = -EINVAL;
96284223598SCornelia Huck 		if (kvm->arch.use_irqchip) {
96384223598SCornelia Huck 			/* Set up dummy routing. */
96484223598SCornelia Huck 			memset(&routing, 0, sizeof(routing));
965*152b2839SNicholas Krause 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
96684223598SCornelia Huck 		}
96784223598SCornelia Huck 		break;
96884223598SCornelia Huck 	}
969f2061656SDominik Dingel 	case KVM_SET_DEVICE_ATTR: {
970f2061656SDominik Dingel 		r = -EFAULT;
971f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
972f2061656SDominik Dingel 			break;
973f2061656SDominik Dingel 		r = kvm_s390_vm_set_attr(kvm, &attr);
974f2061656SDominik Dingel 		break;
975f2061656SDominik Dingel 	}
976f2061656SDominik Dingel 	case KVM_GET_DEVICE_ATTR: {
977f2061656SDominik Dingel 		r = -EFAULT;
978f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
979f2061656SDominik Dingel 			break;
980f2061656SDominik Dingel 		r = kvm_s390_vm_get_attr(kvm, &attr);
981f2061656SDominik Dingel 		break;
982f2061656SDominik Dingel 	}
983f2061656SDominik Dingel 	case KVM_HAS_DEVICE_ATTR: {
984f2061656SDominik Dingel 		r = -EFAULT;
985f2061656SDominik Dingel 		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
986f2061656SDominik Dingel 			break;
987f2061656SDominik Dingel 		r = kvm_s390_vm_has_attr(kvm, &attr);
988f2061656SDominik Dingel 		break;
989f2061656SDominik Dingel 	}
99030ee2a98SJason J. Herne 	case KVM_S390_GET_SKEYS: {
99130ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
99230ee2a98SJason J. Herne 
99330ee2a98SJason J. Herne 		r = -EFAULT;
99430ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
99530ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
99630ee2a98SJason J. Herne 			break;
99730ee2a98SJason J. Herne 		r = kvm_s390_get_skeys(kvm, &args);
99830ee2a98SJason J. Herne 		break;
99930ee2a98SJason J. Herne 	}
100030ee2a98SJason J. Herne 	case KVM_S390_SET_SKEYS: {
100130ee2a98SJason J. Herne 		struct kvm_s390_skeys args;
100230ee2a98SJason J. Herne 
100330ee2a98SJason J. Herne 		r = -EFAULT;
100430ee2a98SJason J. Herne 		if (copy_from_user(&args, argp,
100530ee2a98SJason J. Herne 				   sizeof(struct kvm_s390_skeys)))
100630ee2a98SJason J. Herne 			break;
100730ee2a98SJason J. Herne 		r = kvm_s390_set_skeys(kvm, &args);
100830ee2a98SJason J. Herne 		break;
100930ee2a98SJason J. Herne 	}
1010b0c632dbSHeiko Carstens 	default:
1011367e1319SAvi Kivity 		r = -ENOTTY;
1012b0c632dbSHeiko Carstens 	}
1013b0c632dbSHeiko Carstens 
1014b0c632dbSHeiko Carstens 	return r;
1015b0c632dbSHeiko Carstens }
1016b0c632dbSHeiko Carstens 
101745c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config)
101845c9b47cSTony Krowiak {
101945c9b47cSTony Krowiak 	u32 fcn_code = 0x04000000UL;
102086044c8cSChristian Borntraeger 	u32 cc = 0;
102145c9b47cSTony Krowiak 
102286044c8cSChristian Borntraeger 	memset(config, 0, 128);
102345c9b47cSTony Krowiak 	asm volatile(
102445c9b47cSTony Krowiak 		"lgr 0,%1\n"
102545c9b47cSTony Krowiak 		"lgr 2,%2\n"
102645c9b47cSTony Krowiak 		".long 0xb2af0000\n"		/* PQAP(QCI) */
102786044c8cSChristian Borntraeger 		"0: ipm %0\n"
102845c9b47cSTony Krowiak 		"srl %0,28\n"
102986044c8cSChristian Borntraeger 		"1:\n"
103086044c8cSChristian Borntraeger 		EX_TABLE(0b, 1b)
103186044c8cSChristian Borntraeger 		: "+r" (cc)
103245c9b47cSTony Krowiak 		: "r" (fcn_code), "r" (config)
103345c9b47cSTony Krowiak 		: "cc", "0", "2", "memory"
103445c9b47cSTony Krowiak 	);
103545c9b47cSTony Krowiak 
103645c9b47cSTony Krowiak 	return cc;
103745c9b47cSTony Krowiak }
103845c9b47cSTony Krowiak 
103945c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void)
104045c9b47cSTony Krowiak {
104145c9b47cSTony Krowiak 	u8 config[128];
104245c9b47cSTony Krowiak 	int cc;
104345c9b47cSTony Krowiak 
104445c9b47cSTony Krowiak 	if (test_facility(2) && test_facility(12)) {
104545c9b47cSTony Krowiak 		cc = kvm_s390_query_ap_config(config);
104645c9b47cSTony Krowiak 
104745c9b47cSTony Krowiak 		if (cc)
104845c9b47cSTony Krowiak 			pr_err("PQAP(QCI) failed with cc=%d", cc);
104945c9b47cSTony Krowiak 		else
105045c9b47cSTony Krowiak 			return config[0] & 0x40;
105145c9b47cSTony Krowiak 	}
105245c9b47cSTony Krowiak 
105345c9b47cSTony Krowiak 	return 0;
105445c9b47cSTony Krowiak }
105545c9b47cSTony Krowiak 
105645c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm)
105745c9b47cSTony Krowiak {
105845c9b47cSTony Krowiak 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
105945c9b47cSTony Krowiak 
106045c9b47cSTony Krowiak 	if (kvm_s390_apxa_installed())
106145c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
106245c9b47cSTony Krowiak 	else
106345c9b47cSTony Krowiak 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
106445c9b47cSTony Krowiak }
106545c9b47cSTony Krowiak 
10669d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
10679d8d5786SMichael Mueller {
10689d8d5786SMichael Mueller 	get_cpu_id(cpu_id);
10699d8d5786SMichael Mueller 	cpu_id->version = 0xff;
10709d8d5786SMichael Mueller }
10719d8d5786SMichael Mueller 
10725102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm)
10735102ee87STony Krowiak {
10749d8d5786SMichael Mueller 	if (!test_kvm_facility(kvm, 76))
10755102ee87STony Krowiak 		return 0;
10765102ee87STony Krowiak 
10775102ee87STony Krowiak 	kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
10785102ee87STony Krowiak 					 GFP_KERNEL | GFP_DMA);
10795102ee87STony Krowiak 	if (!kvm->arch.crypto.crycb)
10805102ee87STony Krowiak 		return -ENOMEM;
10815102ee87STony Krowiak 
108245c9b47cSTony Krowiak 	kvm_s390_set_crycb_format(kvm);
10835102ee87STony Krowiak 
1084ed6f76b4STony Krowiak 	/* Enable AES/DEA protected key functions by default */
1085ed6f76b4STony Krowiak 	kvm->arch.crypto.aes_kw = 1;
1086ed6f76b4STony Krowiak 	kvm->arch.crypto.dea_kw = 1;
1087ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1088ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1089ed6f76b4STony Krowiak 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1090ed6f76b4STony Krowiak 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1091a374e892STony Krowiak 
10925102ee87STony Krowiak 	return 0;
10935102ee87STony Krowiak }
10945102ee87STony Krowiak 
1095e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1096b0c632dbSHeiko Carstens {
10979d8d5786SMichael Mueller 	int i, rc;
1098b0c632dbSHeiko Carstens 	char debug_name[16];
1099f6c137ffSChristian Borntraeger 	static unsigned long sca_offset;
1100b0c632dbSHeiko Carstens 
1101e08b9637SCarsten Otte 	rc = -EINVAL;
1102e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
1103e08b9637SCarsten Otte 	if (type & ~KVM_VM_S390_UCONTROL)
1104e08b9637SCarsten Otte 		goto out_err;
1105e08b9637SCarsten Otte 	if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1106e08b9637SCarsten Otte 		goto out_err;
1107e08b9637SCarsten Otte #else
1108e08b9637SCarsten Otte 	if (type)
1109e08b9637SCarsten Otte 		goto out_err;
1110e08b9637SCarsten Otte #endif
1111e08b9637SCarsten Otte 
1112b0c632dbSHeiko Carstens 	rc = s390_enable_sie();
1113b0c632dbSHeiko Carstens 	if (rc)
1114d89f5effSJan Kiszka 		goto out_err;
1115b0c632dbSHeiko Carstens 
1116b290411aSCarsten Otte 	rc = -ENOMEM;
1117b290411aSCarsten Otte 
1118b0c632dbSHeiko Carstens 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1119b0c632dbSHeiko Carstens 	if (!kvm->arch.sca)
1120d89f5effSJan Kiszka 		goto out_err;
1121f6c137ffSChristian Borntraeger 	spin_lock(&kvm_lock);
1122f6c137ffSChristian Borntraeger 	sca_offset = (sca_offset + 16) & 0x7f0;
1123f6c137ffSChristian Borntraeger 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1124f6c137ffSChristian Borntraeger 	spin_unlock(&kvm_lock);
1125b0c632dbSHeiko Carstens 
1126b0c632dbSHeiko Carstens 	sprintf(debug_name, "kvm-%u", current->pid);
1127b0c632dbSHeiko Carstens 
11281cb9cf72SChristian Borntraeger 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1129b0c632dbSHeiko Carstens 	if (!kvm->arch.dbf)
113040f5b735SDominik Dingel 		goto out_err;
1131b0c632dbSHeiko Carstens 
11329d8d5786SMichael Mueller 	/*
11339d8d5786SMichael Mueller 	 * The architectural maximum amount of facilities is 16 kbit. To store
11349d8d5786SMichael Mueller 	 * this amount, 2 kbyte of memory is required. Thus we need a full
1135981467c9SMichael Mueller 	 * page to hold the guest facility list (arch.model.fac->list) and the
1136981467c9SMichael Mueller 	 * facility mask (arch.model.fac->mask). Its address size has to be
11379d8d5786SMichael Mueller 	 * 31 bits and word aligned.
11389d8d5786SMichael Mueller 	 */
11399d8d5786SMichael Mueller 	kvm->arch.model.fac =
1140981467c9SMichael Mueller 		(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
11419d8d5786SMichael Mueller 	if (!kvm->arch.model.fac)
114240f5b735SDominik Dingel 		goto out_err;
11439d8d5786SMichael Mueller 
1144fb5bf93fSMichael Mueller 	/* Populate the facility mask initially. */
1145981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
114694422ee8SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
11479d8d5786SMichael Mueller 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
11489d8d5786SMichael Mueller 		if (i < kvm_s390_fac_list_mask_size())
1149981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
11509d8d5786SMichael Mueller 		else
1151981467c9SMichael Mueller 			kvm->arch.model.fac->mask[i] = 0UL;
11529d8d5786SMichael Mueller 	}
11539d8d5786SMichael Mueller 
1154981467c9SMichael Mueller 	/* Populate the facility list initially. */
1155981467c9SMichael Mueller 	memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1156981467c9SMichael Mueller 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
1157981467c9SMichael Mueller 
11589d8d5786SMichael Mueller 	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
115937c5f6c8SDavid Hildenbrand 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
11609d8d5786SMichael Mueller 
11615102ee87STony Krowiak 	if (kvm_s390_crypto_init(kvm) < 0)
116240f5b735SDominik Dingel 		goto out_err;
11635102ee87STony Krowiak 
1164ba5c1e9bSCarsten Otte 	spin_lock_init(&kvm->arch.float_int.lock);
11656d3da241SJens Freimann 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
11666d3da241SJens Freimann 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
11678a242234SHeiko Carstens 	init_waitqueue_head(&kvm->arch.ipte_wq);
1168a6b7e459SThomas Huth 	mutex_init(&kvm->arch.ipte_mutex);
1169ba5c1e9bSCarsten Otte 
1170b0c632dbSHeiko Carstens 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
117178f26131SChristian Borntraeger 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
1172b0c632dbSHeiko Carstens 
1173e08b9637SCarsten Otte 	if (type & KVM_VM_S390_UCONTROL) {
1174e08b9637SCarsten Otte 		kvm->arch.gmap = NULL;
1175e08b9637SCarsten Otte 	} else {
11760349985aSChristian Borntraeger 		kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
1177598841caSCarsten Otte 		if (!kvm->arch.gmap)
117840f5b735SDominik Dingel 			goto out_err;
11792c70fe44SChristian Borntraeger 		kvm->arch.gmap->private = kvm;
118024eb3a82SDominik Dingel 		kvm->arch.gmap->pfault_enabled = 0;
1181e08b9637SCarsten Otte 	}
1182fa6b7fe9SCornelia Huck 
1183fa6b7fe9SCornelia Huck 	kvm->arch.css_support = 0;
118484223598SCornelia Huck 	kvm->arch.use_irqchip = 0;
118572f25020SJason J. Herne 	kvm->arch.epoch = 0;
1186fa6b7fe9SCornelia Huck 
11878ad35755SDavid Hildenbrand 	spin_lock_init(&kvm->arch.start_stop_lock);
118878f26131SChristian Borntraeger 	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
11898ad35755SDavid Hildenbrand 
1190d89f5effSJan Kiszka 	return 0;
1191d89f5effSJan Kiszka out_err:
119240f5b735SDominik Dingel 	kfree(kvm->arch.crypto.crycb);
119340f5b735SDominik Dingel 	free_page((unsigned long)kvm->arch.model.fac);
119440f5b735SDominik Dingel 	debug_unregister(kvm->arch.dbf);
119540f5b735SDominik Dingel 	free_page((unsigned long)(kvm->arch.sca));
119678f26131SChristian Borntraeger 	KVM_EVENT(3, "creation of vm failed: %d", rc);
1197d89f5effSJan Kiszka 	return rc;
1198b0c632dbSHeiko Carstens }
1199b0c632dbSHeiko Carstens 
1200d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1201d329c035SChristian Borntraeger {
1202d329c035SChristian Borntraeger 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1203ade38c31SCornelia Huck 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
120467335e63SChristian Borntraeger 	kvm_s390_clear_local_irqs(vcpu);
12053c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
120658f9460bSCarsten Otte 	if (!kvm_is_ucontrol(vcpu->kvm)) {
120758f9460bSCarsten Otte 		clear_bit(63 - vcpu->vcpu_id,
120858f9460bSCarsten Otte 			  (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1209abf4a71eSCarsten Otte 		if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1210abf4a71eSCarsten Otte 		    (__u64) vcpu->arch.sie_block)
1211abf4a71eSCarsten Otte 			vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
121258f9460bSCarsten Otte 	}
1213abf4a71eSCarsten Otte 	smp_mb();
121427e0393fSCarsten Otte 
121527e0393fSCarsten Otte 	if (kvm_is_ucontrol(vcpu->kvm))
121627e0393fSCarsten Otte 		gmap_free(vcpu->arch.gmap);
121727e0393fSCarsten Otte 
1218e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma)
1219b31605c1SDominik Dingel 		kvm_s390_vcpu_unsetup_cmma(vcpu);
1220d329c035SChristian Borntraeger 	free_page((unsigned long)(vcpu->arch.sie_block));
1221b31288faSKonstantin Weitz 
12226692cef3SChristian Borntraeger 	kvm_vcpu_uninit(vcpu);
1223b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
1224d329c035SChristian Borntraeger }
1225d329c035SChristian Borntraeger 
1226d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm)
1227d329c035SChristian Borntraeger {
1228d329c035SChristian Borntraeger 	unsigned int i;
1229988a2caeSGleb Natapov 	struct kvm_vcpu *vcpu;
1230d329c035SChristian Borntraeger 
1231988a2caeSGleb Natapov 	kvm_for_each_vcpu(i, vcpu, kvm)
1232988a2caeSGleb Natapov 		kvm_arch_vcpu_destroy(vcpu);
1233988a2caeSGleb Natapov 
1234988a2caeSGleb Natapov 	mutex_lock(&kvm->lock);
1235988a2caeSGleb Natapov 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1236d329c035SChristian Borntraeger 		kvm->vcpus[i] = NULL;
1237988a2caeSGleb Natapov 
1238988a2caeSGleb Natapov 	atomic_set(&kvm->online_vcpus, 0);
1239988a2caeSGleb Natapov 	mutex_unlock(&kvm->lock);
1240d329c035SChristian Borntraeger }
1241d329c035SChristian Borntraeger 
1242b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm)
1243b0c632dbSHeiko Carstens {
1244d329c035SChristian Borntraeger 	kvm_free_vcpus(kvm);
12459d8d5786SMichael Mueller 	free_page((unsigned long)kvm->arch.model.fac);
1246b0c632dbSHeiko Carstens 	free_page((unsigned long)(kvm->arch.sca));
1247d329c035SChristian Borntraeger 	debug_unregister(kvm->arch.dbf);
12485102ee87STony Krowiak 	kfree(kvm->arch.crypto.crycb);
124927e0393fSCarsten Otte 	if (!kvm_is_ucontrol(kvm))
1250598841caSCarsten Otte 		gmap_free(kvm->arch.gmap);
1251841b91c5SCornelia Huck 	kvm_s390_destroy_adapters(kvm);
125267335e63SChristian Borntraeger 	kvm_s390_clear_float_irqs(kvm);
125378f26131SChristian Borntraeger 	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
1254b0c632dbSHeiko Carstens }
1255b0c632dbSHeiko Carstens 
1256b0c632dbSHeiko Carstens /* Section: vcpu related */
1257dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1258b0c632dbSHeiko Carstens {
1259c6c956b8SMartin Schwidefsky 	vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
126027e0393fSCarsten Otte 	if (!vcpu->arch.gmap)
126127e0393fSCarsten Otte 		return -ENOMEM;
12622c70fe44SChristian Borntraeger 	vcpu->arch.gmap->private = vcpu->kvm;
1263dafd032aSDominik Dingel 
126427e0393fSCarsten Otte 	return 0;
126527e0393fSCarsten Otte }
126627e0393fSCarsten Otte 
1267dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1268dafd032aSDominik Dingel {
1269dafd032aSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1270dafd032aSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
127159674c1aSChristian Borntraeger 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
127259674c1aSChristian Borntraeger 				    KVM_SYNC_GPRS |
12739eed0735SChristian Borntraeger 				    KVM_SYNC_ACRS |
1274b028ee3eSDavid Hildenbrand 				    KVM_SYNC_CRS |
1275b028ee3eSDavid Hildenbrand 				    KVM_SYNC_ARCH0 |
1276b028ee3eSDavid Hildenbrand 				    KVM_SYNC_PFAULT;
127768c55750SEric Farman 	if (test_kvm_facility(vcpu->kvm, 129))
127868c55750SEric Farman 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1279dafd032aSDominik Dingel 
1280dafd032aSDominik Dingel 	if (kvm_is_ucontrol(vcpu->kvm))
1281dafd032aSDominik Dingel 		return __kvm_ucontrol_vcpu_init(vcpu);
1282dafd032aSDominik Dingel 
1283b0c632dbSHeiko Carstens 	return 0;
1284b0c632dbSHeiko Carstens }
1285b0c632dbSHeiko Carstens 
1286b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1287b0c632dbSHeiko Carstens {
12884725c860SMartin Schwidefsky 	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
128918280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129))
129068c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
129168c55750SEric Farman 	else
12924725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.host_fpregs.fprs);
1293b0c632dbSHeiko Carstens 	save_access_regs(vcpu->arch.host_acrs);
129418280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
129568c55750SEric Farman 		restore_fp_ctl(&vcpu->run->s.regs.fpc);
129668c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
129768c55750SEric Farman 	} else {
12984725c860SMartin Schwidefsky 		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
12994725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
130068c55750SEric Farman 	}
130159674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1302480e5926SChristian Borntraeger 	gmap_enable(vcpu->arch.gmap);
13039e6dabefSCornelia Huck 	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1304b0c632dbSHeiko Carstens }
1305b0c632dbSHeiko Carstens 
1306b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1307b0c632dbSHeiko Carstens {
13089e6dabefSCornelia Huck 	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1309480e5926SChristian Borntraeger 	gmap_disable(vcpu->arch.gmap);
131018280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
131168c55750SEric Farman 		save_fp_ctl(&vcpu->run->s.regs.fpc);
131268c55750SEric Farman 		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
131368c55750SEric Farman 	} else {
13144725c860SMartin Schwidefsky 		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
13154725c860SMartin Schwidefsky 		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
131668c55750SEric Farman 	}
131759674c1aSChristian Borntraeger 	save_access_regs(vcpu->run->s.regs.acrs);
13184725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
131918280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129))
132068c55750SEric Farman 		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
132168c55750SEric Farman 	else
13224725c860SMartin Schwidefsky 		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
1323b0c632dbSHeiko Carstens 	restore_access_regs(vcpu->arch.host_acrs);
1324b0c632dbSHeiko Carstens }
1325b0c632dbSHeiko Carstens 
1326b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1327b0c632dbSHeiko Carstens {
1328b0c632dbSHeiko Carstens 	/* this equals initial cpu reset in pop, but we don't switch to ESA */
1329b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.mask = 0UL;
1330b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gpsw.addr = 0UL;
13318d26cf7bSChristian Borntraeger 	kvm_s390_set_prefix(vcpu, 0);
1332b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->cputm     = 0UL;
1333b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->ckc       = 0UL;
1334b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->todpr     = 0;
1335b0c632dbSHeiko Carstens 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1336b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
1337b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1338b0c632dbSHeiko Carstens 	vcpu->arch.guest_fpregs.fpc = 0;
1339b0c632dbSHeiko Carstens 	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1340b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->gbea = 1;
1341672550fbSChristian Borntraeger 	vcpu->arch.sie_block->pp = 0;
13423c038e6bSDominik Dingel 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
13433c038e6bSDominik Dingel 	kvm_clear_async_pf_completion_queue(vcpu);
13446352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
13456852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
13462ed10cc1SJens Freimann 	kvm_s390_clear_local_irqs(vcpu);
1347b0c632dbSHeiko Carstens }
1348b0c632dbSHeiko Carstens 
134931928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
135042897d86SMarcelo Tosatti {
135172f25020SJason J. Herne 	mutex_lock(&vcpu->kvm->lock);
1352fdf03650SFan Zhang 	preempt_disable();
135372f25020SJason J. Herne 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1354fdf03650SFan Zhang 	preempt_enable();
135572f25020SJason J. Herne 	mutex_unlock(&vcpu->kvm->lock);
1356dafd032aSDominik Dingel 	if (!kvm_is_ucontrol(vcpu->kvm))
1357dafd032aSDominik Dingel 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
135842897d86SMarcelo Tosatti }
135942897d86SMarcelo Tosatti 
13605102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
13615102ee87STony Krowiak {
13629d8d5786SMichael Mueller 	if (!test_kvm_facility(vcpu->kvm, 76))
13635102ee87STony Krowiak 		return;
13645102ee87STony Krowiak 
1365a374e892STony Krowiak 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1366a374e892STony Krowiak 
1367a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.aes_kw)
1368a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1369a374e892STony Krowiak 	if (vcpu->kvm->arch.crypto.dea_kw)
1370a374e892STony Krowiak 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1371a374e892STony Krowiak 
13725102ee87STony Krowiak 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
13735102ee87STony Krowiak }
13745102ee87STony Krowiak 
1375b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1376b31605c1SDominik Dingel {
1377b31605c1SDominik Dingel 	free_page(vcpu->arch.sie_block->cbrlo);
1378b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = 0;
1379b31605c1SDominik Dingel }
1380b31605c1SDominik Dingel 
1381b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1382b31605c1SDominik Dingel {
1383b31605c1SDominik Dingel 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1384b31605c1SDominik Dingel 	if (!vcpu->arch.sie_block->cbrlo)
1385b31605c1SDominik Dingel 		return -ENOMEM;
1386b31605c1SDominik Dingel 
1387b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 |= 0x80;
1388b31605c1SDominik Dingel 	vcpu->arch.sie_block->ecb2 &= ~0x08;
1389b31605c1SDominik Dingel 	return 0;
1390b31605c1SDominik Dingel }
1391b31605c1SDominik Dingel 
139291520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
139391520f1aSMichael Mueller {
139491520f1aSMichael Mueller 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
139591520f1aSMichael Mueller 
139691520f1aSMichael Mueller 	vcpu->arch.cpu_id = model->cpu_id;
139791520f1aSMichael Mueller 	vcpu->arch.sie_block->ibc = model->ibc;
139891520f1aSMichael Mueller 	vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
139991520f1aSMichael Mueller }
140091520f1aSMichael Mueller 
1401b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1402b0c632dbSHeiko Carstens {
1403b31605c1SDominik Dingel 	int rc = 0;
1404b31288faSKonstantin Weitz 
14059e6dabefSCornelia Huck 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
14069e6dabefSCornelia Huck 						    CPUSTAT_SM |
1407a4a4f191SGuenther Hutzl 						    CPUSTAT_STOPPED);
1408a4a4f191SGuenther Hutzl 
140953df84f8SGuenther Hutzl 	if (test_kvm_facility(vcpu->kvm, 78))
141053df84f8SGuenther Hutzl 		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
141153df84f8SGuenther Hutzl 	else if (test_kvm_facility(vcpu->kvm, 8))
1412a4a4f191SGuenther Hutzl 		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1413a4a4f191SGuenther Hutzl 
141491520f1aSMichael Mueller 	kvm_s390_vcpu_setup_model(vcpu);
141591520f1aSMichael Mueller 
1416fc34531dSChristian Borntraeger 	vcpu->arch.sie_block->ecb   = 6;
14179d8d5786SMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
14187feb6bb8SMichael Mueller 		vcpu->arch.sie_block->ecb |= 0x10;
14197feb6bb8SMichael Mueller 
142069d0d3a3SChristian Borntraeger 	vcpu->arch.sie_block->ecb2  = 8;
1421ea5f4969SDavid Hildenbrand 	vcpu->arch.sie_block->eca   = 0xC1002000U;
142237c5f6c8SDavid Hildenbrand 	if (sclp.has_siif)
1423217a4406SHeiko Carstens 		vcpu->arch.sie_block->eca |= 1;
142437c5f6c8SDavid Hildenbrand 	if (sclp.has_sigpif)
1425ea5f4969SDavid Hildenbrand 		vcpu->arch.sie_block->eca |= 0x10000000U;
142618280d8bSMichael Mueller 	if (test_kvm_facility(vcpu->kvm, 129)) {
142713211ea7SEric Farman 		vcpu->arch.sie_block->eca |= 0x00020000;
142813211ea7SEric Farman 		vcpu->arch.sie_block->ecd |= 0x20000000;
142913211ea7SEric Farman 	}
1430492d8642SThomas Huth 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
14315a5e6536SMatthew Rosato 
1432e6db1d61SDominik Dingel 	if (vcpu->kvm->arch.use_cmma) {
1433b31605c1SDominik Dingel 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
1434b31605c1SDominik Dingel 		if (rc)
1435b31605c1SDominik Dingel 			return rc;
1436b31288faSKonstantin Weitz 	}
14370ac96cafSDavid Hildenbrand 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1438ca872302SChristian Borntraeger 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
14399d8d5786SMichael Mueller 
14405102ee87STony Krowiak 	kvm_s390_vcpu_crypto_setup(vcpu);
14415102ee87STony Krowiak 
1442b31605c1SDominik Dingel 	return rc;
1443b0c632dbSHeiko Carstens }
1444b0c632dbSHeiko Carstens 
1445b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1446b0c632dbSHeiko Carstens 				      unsigned int id)
1447b0c632dbSHeiko Carstens {
14484d47555aSCarsten Otte 	struct kvm_vcpu *vcpu;
14497feb6bb8SMichael Mueller 	struct sie_page *sie_page;
14504d47555aSCarsten Otte 	int rc = -EINVAL;
1451b0c632dbSHeiko Carstens 
14524d47555aSCarsten Otte 	if (id >= KVM_MAX_VCPUS)
14534d47555aSCarsten Otte 		goto out;
14544d47555aSCarsten Otte 
14554d47555aSCarsten Otte 	rc = -ENOMEM;
14564d47555aSCarsten Otte 
1457b110feafSMichael Mueller 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1458b0c632dbSHeiko Carstens 	if (!vcpu)
14594d47555aSCarsten Otte 		goto out;
1460b0c632dbSHeiko Carstens 
14617feb6bb8SMichael Mueller 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
14627feb6bb8SMichael Mueller 	if (!sie_page)
1463b0c632dbSHeiko Carstens 		goto out_free_cpu;
1464b0c632dbSHeiko Carstens 
14657feb6bb8SMichael Mueller 	vcpu->arch.sie_block = &sie_page->sie_block;
14667feb6bb8SMichael Mueller 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
146768c55750SEric Farman 	vcpu->arch.host_vregs = &sie_page->vregs;
14687feb6bb8SMichael Mueller 
1469b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icpua = id;
147058f9460bSCarsten Otte 	if (!kvm_is_ucontrol(kvm)) {
147158f9460bSCarsten Otte 		if (!kvm->arch.sca) {
147258f9460bSCarsten Otte 			WARN_ON_ONCE(1);
147358f9460bSCarsten Otte 			goto out_free_cpu;
147458f9460bSCarsten Otte 		}
1475abf4a71eSCarsten Otte 		if (!kvm->arch.sca->cpu[id].sda)
147658f9460bSCarsten Otte 			kvm->arch.sca->cpu[id].sda =
147758f9460bSCarsten Otte 				(__u64) vcpu->arch.sie_block;
147858f9460bSCarsten Otte 		vcpu->arch.sie_block->scaoh =
147958f9460bSCarsten Otte 			(__u32)(((__u64)kvm->arch.sca) >> 32);
1480b0c632dbSHeiko Carstens 		vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1481fc34531dSChristian Borntraeger 		set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
148258f9460bSCarsten Otte 	}
1483b0c632dbSHeiko Carstens 
1484ba5c1e9bSCarsten Otte 	spin_lock_init(&vcpu->arch.local_int.lock);
1485ba5c1e9bSCarsten Otte 	vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1486d0321a24SChristian Borntraeger 	vcpu->arch.local_int.wq = &vcpu->wq;
14875288fbf0SChristian Borntraeger 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1488ba5c1e9bSCarsten Otte 
1489b0c632dbSHeiko Carstens 	rc = kvm_vcpu_init(vcpu, kvm, id);
1490b0c632dbSHeiko Carstens 	if (rc)
14917b06bf2fSWei Yongjun 		goto out_free_sie_block;
1492b0c632dbSHeiko Carstens 	VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1493b0c632dbSHeiko Carstens 		 vcpu->arch.sie_block);
1494ade38c31SCornelia Huck 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1495b0c632dbSHeiko Carstens 
1496b0c632dbSHeiko Carstens 	return vcpu;
14977b06bf2fSWei Yongjun out_free_sie_block:
14987b06bf2fSWei Yongjun 	free_page((unsigned long)(vcpu->arch.sie_block));
1499b0c632dbSHeiko Carstens out_free_cpu:
1500b110feafSMichael Mueller 	kmem_cache_free(kvm_vcpu_cache, vcpu);
15014d47555aSCarsten Otte out:
1502b0c632dbSHeiko Carstens 	return ERR_PTR(rc);
1503b0c632dbSHeiko Carstens }
1504b0c632dbSHeiko Carstens 
1505b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1506b0c632dbSHeiko Carstens {
15079a022067SDavid Hildenbrand 	return kvm_s390_vcpu_has_irq(vcpu, 0);
1508b0c632dbSHeiko Carstens }
1509b0c632dbSHeiko Carstens 
151027406cd5SChristian Borntraeger void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
151149b99e1eSChristian Borntraeger {
151249b99e1eSChristian Borntraeger 	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
151361a6df54SDavid Hildenbrand 	exit_sie(vcpu);
151449b99e1eSChristian Borntraeger }
151549b99e1eSChristian Borntraeger 
151627406cd5SChristian Borntraeger void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
151749b99e1eSChristian Borntraeger {
151849b99e1eSChristian Borntraeger 	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
151949b99e1eSChristian Borntraeger }
152049b99e1eSChristian Borntraeger 
15218e236546SChristian Borntraeger static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
15228e236546SChristian Borntraeger {
15238e236546SChristian Borntraeger 	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
152461a6df54SDavid Hildenbrand 	exit_sie(vcpu);
15258e236546SChristian Borntraeger }
15268e236546SChristian Borntraeger 
15278e236546SChristian Borntraeger static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
15288e236546SChristian Borntraeger {
15298e236546SChristian Borntraeger 	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
15308e236546SChristian Borntraeger }
15318e236546SChristian Borntraeger 
153249b99e1eSChristian Borntraeger /*
153349b99e1eSChristian Borntraeger  * Kick a guest cpu out of SIE and wait until SIE is not running.
153449b99e1eSChristian Borntraeger  * If the CPU is not running (e.g. waiting as idle) the function will
153549b99e1eSChristian Borntraeger  * return immediately. */
153649b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu)
153749b99e1eSChristian Borntraeger {
153849b99e1eSChristian Borntraeger 	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
153949b99e1eSChristian Borntraeger 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
154049b99e1eSChristian Borntraeger 		cpu_relax();
154149b99e1eSChristian Borntraeger }
154249b99e1eSChristian Borntraeger 
15438e236546SChristian Borntraeger /* Kick a guest cpu out of SIE to process a request synchronously */
15448e236546SChristian Borntraeger void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
154549b99e1eSChristian Borntraeger {
15468e236546SChristian Borntraeger 	kvm_make_request(req, vcpu);
15478e236546SChristian Borntraeger 	kvm_s390_vcpu_request(vcpu);
154849b99e1eSChristian Borntraeger }
154949b99e1eSChristian Borntraeger 
15502c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
15512c70fe44SChristian Borntraeger {
15522c70fe44SChristian Borntraeger 	int i;
15532c70fe44SChristian Borntraeger 	struct kvm *kvm = gmap->private;
15542c70fe44SChristian Borntraeger 	struct kvm_vcpu *vcpu;
15552c70fe44SChristian Borntraeger 
15562c70fe44SChristian Borntraeger 	kvm_for_each_vcpu(i, vcpu, kvm) {
15572c70fe44SChristian Borntraeger 		/* match against both prefix pages */
1558fda902cbSMichael Mueller 		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
15592c70fe44SChristian Borntraeger 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
15608e236546SChristian Borntraeger 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
15612c70fe44SChristian Borntraeger 		}
15622c70fe44SChristian Borntraeger 	}
15632c70fe44SChristian Borntraeger }
15642c70fe44SChristian Borntraeger 
1565b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1566b6d33834SChristoffer Dall {
1567b6d33834SChristoffer Dall 	/* kvm common code refers to this, but never calls it */
1568b6d33834SChristoffer Dall 	BUG();
1569b6d33834SChristoffer Dall 	return 0;
1570b6d33834SChristoffer Dall }
1571b6d33834SChristoffer Dall 
157214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
157314eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
157414eebd91SCarsten Otte {
157514eebd91SCarsten Otte 	int r = -EINVAL;
157614eebd91SCarsten Otte 
157714eebd91SCarsten Otte 	switch (reg->id) {
157829b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
157929b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->todpr,
158029b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
158129b7c71bSCarsten Otte 		break;
158229b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
158329b7c71bSCarsten Otte 		r = put_user(vcpu->arch.sie_block->epoch,
158429b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
158529b7c71bSCarsten Otte 		break;
158646a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
158746a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->cputm,
158846a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
158946a6dd1cSJason J. herne 		break;
159046a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
159146a6dd1cSJason J. herne 		r = put_user(vcpu->arch.sie_block->ckc,
159246a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
159346a6dd1cSJason J. herne 		break;
1594536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1595536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_token,
1596536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1597536336c2SDominik Dingel 		break;
1598536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1599536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_compare,
1600536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1601536336c2SDominik Dingel 		break;
1602536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1603536336c2SDominik Dingel 		r = put_user(vcpu->arch.pfault_select,
1604536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1605536336c2SDominik Dingel 		break;
1606672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1607672550fbSChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->pp,
1608672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1609672550fbSChristian Borntraeger 		break;
1610afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1611afa45ff5SChristian Borntraeger 		r = put_user(vcpu->arch.sie_block->gbea,
1612afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1613afa45ff5SChristian Borntraeger 		break;
161414eebd91SCarsten Otte 	default:
161514eebd91SCarsten Otte 		break;
161614eebd91SCarsten Otte 	}
161714eebd91SCarsten Otte 
161814eebd91SCarsten Otte 	return r;
161914eebd91SCarsten Otte }
162014eebd91SCarsten Otte 
162114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
162214eebd91SCarsten Otte 					   struct kvm_one_reg *reg)
162314eebd91SCarsten Otte {
162414eebd91SCarsten Otte 	int r = -EINVAL;
162514eebd91SCarsten Otte 
162614eebd91SCarsten Otte 	switch (reg->id) {
162729b7c71bSCarsten Otte 	case KVM_REG_S390_TODPR:
162829b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->todpr,
162929b7c71bSCarsten Otte 			     (u32 __user *)reg->addr);
163029b7c71bSCarsten Otte 		break;
163129b7c71bSCarsten Otte 	case KVM_REG_S390_EPOCHDIFF:
163229b7c71bSCarsten Otte 		r = get_user(vcpu->arch.sie_block->epoch,
163329b7c71bSCarsten Otte 			     (u64 __user *)reg->addr);
163429b7c71bSCarsten Otte 		break;
163546a6dd1cSJason J. herne 	case KVM_REG_S390_CPU_TIMER:
163646a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->cputm,
163746a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
163846a6dd1cSJason J. herne 		break;
163946a6dd1cSJason J. herne 	case KVM_REG_S390_CLOCK_COMP:
164046a6dd1cSJason J. herne 		r = get_user(vcpu->arch.sie_block->ckc,
164146a6dd1cSJason J. herne 			     (u64 __user *)reg->addr);
164246a6dd1cSJason J. herne 		break;
1643536336c2SDominik Dingel 	case KVM_REG_S390_PFTOKEN:
1644536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_token,
1645536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
16469fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
16479fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
1648536336c2SDominik Dingel 		break;
1649536336c2SDominik Dingel 	case KVM_REG_S390_PFCOMPARE:
1650536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_compare,
1651536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1652536336c2SDominik Dingel 		break;
1653536336c2SDominik Dingel 	case KVM_REG_S390_PFSELECT:
1654536336c2SDominik Dingel 		r = get_user(vcpu->arch.pfault_select,
1655536336c2SDominik Dingel 			     (u64 __user *)reg->addr);
1656536336c2SDominik Dingel 		break;
1657672550fbSChristian Borntraeger 	case KVM_REG_S390_PP:
1658672550fbSChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->pp,
1659672550fbSChristian Borntraeger 			     (u64 __user *)reg->addr);
1660672550fbSChristian Borntraeger 		break;
1661afa45ff5SChristian Borntraeger 	case KVM_REG_S390_GBEA:
1662afa45ff5SChristian Borntraeger 		r = get_user(vcpu->arch.sie_block->gbea,
1663afa45ff5SChristian Borntraeger 			     (u64 __user *)reg->addr);
1664afa45ff5SChristian Borntraeger 		break;
166514eebd91SCarsten Otte 	default:
166614eebd91SCarsten Otte 		break;
166714eebd91SCarsten Otte 	}
166814eebd91SCarsten Otte 
166914eebd91SCarsten Otte 	return r;
167014eebd91SCarsten Otte }
1671b6d33834SChristoffer Dall 
1672b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1673b0c632dbSHeiko Carstens {
1674b0c632dbSHeiko Carstens 	kvm_s390_vcpu_initial_reset(vcpu);
1675b0c632dbSHeiko Carstens 	return 0;
1676b0c632dbSHeiko Carstens }
1677b0c632dbSHeiko Carstens 
1678b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1679b0c632dbSHeiko Carstens {
16805a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
1681b0c632dbSHeiko Carstens 	return 0;
1682b0c632dbSHeiko Carstens }
1683b0c632dbSHeiko Carstens 
1684b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1685b0c632dbSHeiko Carstens {
16865a32c1afSChristian Borntraeger 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1687b0c632dbSHeiko Carstens 	return 0;
1688b0c632dbSHeiko Carstens }
1689b0c632dbSHeiko Carstens 
1690b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1691b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1692b0c632dbSHeiko Carstens {
169359674c1aSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1694b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
169559674c1aSChristian Borntraeger 	restore_access_regs(vcpu->run->s.regs.acrs);
1696b0c632dbSHeiko Carstens 	return 0;
1697b0c632dbSHeiko Carstens }
1698b0c632dbSHeiko Carstens 
1699b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1700b0c632dbSHeiko Carstens 				  struct kvm_sregs *sregs)
1701b0c632dbSHeiko Carstens {
170259674c1aSChristian Borntraeger 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1703b0c632dbSHeiko Carstens 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1704b0c632dbSHeiko Carstens 	return 0;
1705b0c632dbSHeiko Carstens }
1706b0c632dbSHeiko Carstens 
1707b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1708b0c632dbSHeiko Carstens {
17094725c860SMartin Schwidefsky 	if (test_fp_ctl(fpu->fpc))
17104725c860SMartin Schwidefsky 		return -EINVAL;
1711b0c632dbSHeiko Carstens 	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
17124725c860SMartin Schwidefsky 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
17134725c860SMartin Schwidefsky 	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
17144725c860SMartin Schwidefsky 	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1715b0c632dbSHeiko Carstens 	return 0;
1716b0c632dbSHeiko Carstens }
1717b0c632dbSHeiko Carstens 
1718b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1719b0c632dbSHeiko Carstens {
1720b0c632dbSHeiko Carstens 	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1721b0c632dbSHeiko Carstens 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1722b0c632dbSHeiko Carstens 	return 0;
1723b0c632dbSHeiko Carstens }
1724b0c632dbSHeiko Carstens 
1725b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1726b0c632dbSHeiko Carstens {
1727b0c632dbSHeiko Carstens 	int rc = 0;
1728b0c632dbSHeiko Carstens 
17297a42fdc2SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
1730b0c632dbSHeiko Carstens 		rc = -EBUSY;
1731d7b0b5ebSCarsten Otte 	else {
1732d7b0b5ebSCarsten Otte 		vcpu->run->psw_mask = psw.mask;
1733d7b0b5ebSCarsten Otte 		vcpu->run->psw_addr = psw.addr;
1734d7b0b5ebSCarsten Otte 	}
1735b0c632dbSHeiko Carstens 	return rc;
1736b0c632dbSHeiko Carstens }
1737b0c632dbSHeiko Carstens 
1738b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1739b0c632dbSHeiko Carstens 				  struct kvm_translation *tr)
1740b0c632dbSHeiko Carstens {
1741b0c632dbSHeiko Carstens 	return -EINVAL; /* not implemented yet */
1742b0c632dbSHeiko Carstens }
1743b0c632dbSHeiko Carstens 
174427291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
174527291e21SDavid Hildenbrand 			      KVM_GUESTDBG_USE_HW_BP | \
174627291e21SDavid Hildenbrand 			      KVM_GUESTDBG_ENABLE)
174727291e21SDavid Hildenbrand 
1748d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1749d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg)
1750b0c632dbSHeiko Carstens {
175127291e21SDavid Hildenbrand 	int rc = 0;
175227291e21SDavid Hildenbrand 
175327291e21SDavid Hildenbrand 	vcpu->guest_debug = 0;
175427291e21SDavid Hildenbrand 	kvm_s390_clear_bp_data(vcpu);
175527291e21SDavid Hildenbrand 
17562de3bfc2SDavid Hildenbrand 	if (dbg->control & ~VALID_GUESTDBG_FLAGS)
175727291e21SDavid Hildenbrand 		return -EINVAL;
175827291e21SDavid Hildenbrand 
175927291e21SDavid Hildenbrand 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
176027291e21SDavid Hildenbrand 		vcpu->guest_debug = dbg->control;
176127291e21SDavid Hildenbrand 		/* enforce guest PER */
176227291e21SDavid Hildenbrand 		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
176327291e21SDavid Hildenbrand 
176427291e21SDavid Hildenbrand 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
176527291e21SDavid Hildenbrand 			rc = kvm_s390_import_bp_data(vcpu, dbg);
176627291e21SDavid Hildenbrand 	} else {
176727291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
176827291e21SDavid Hildenbrand 		vcpu->arch.guestdbg.last_bp = 0;
176927291e21SDavid Hildenbrand 	}
177027291e21SDavid Hildenbrand 
177127291e21SDavid Hildenbrand 	if (rc) {
177227291e21SDavid Hildenbrand 		vcpu->guest_debug = 0;
177327291e21SDavid Hildenbrand 		kvm_s390_clear_bp_data(vcpu);
177427291e21SDavid Hildenbrand 		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
177527291e21SDavid Hildenbrand 	}
177627291e21SDavid Hildenbrand 
177727291e21SDavid Hildenbrand 	return rc;
1778b0c632dbSHeiko Carstens }
1779b0c632dbSHeiko Carstens 
178062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
178162d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
178262d9f0dbSMarcelo Tosatti {
17836352e4d2SDavid Hildenbrand 	/* CHECK_STOP and LOAD are not supported yet */
17846352e4d2SDavid Hildenbrand 	return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
17856352e4d2SDavid Hildenbrand 				       KVM_MP_STATE_OPERATING;
178662d9f0dbSMarcelo Tosatti }
178762d9f0dbSMarcelo Tosatti 
178862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
178962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state)
179062d9f0dbSMarcelo Tosatti {
17916352e4d2SDavid Hildenbrand 	int rc = 0;
17926352e4d2SDavid Hildenbrand 
17936352e4d2SDavid Hildenbrand 	/* user space knows about this interface - let it control the state */
17946352e4d2SDavid Hildenbrand 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
17956352e4d2SDavid Hildenbrand 
17966352e4d2SDavid Hildenbrand 	switch (mp_state->mp_state) {
17976352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_STOPPED:
17986352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_stop(vcpu);
17996352e4d2SDavid Hildenbrand 		break;
18006352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_OPERATING:
18016352e4d2SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
18026352e4d2SDavid Hildenbrand 		break;
18036352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_LOAD:
18046352e4d2SDavid Hildenbrand 	case KVM_MP_STATE_CHECK_STOP:
18056352e4d2SDavid Hildenbrand 		/* fall through - CHECK_STOP and LOAD are not supported yet */
18066352e4d2SDavid Hildenbrand 	default:
18076352e4d2SDavid Hildenbrand 		rc = -ENXIO;
18086352e4d2SDavid Hildenbrand 	}
18096352e4d2SDavid Hildenbrand 
18106352e4d2SDavid Hildenbrand 	return rc;
181162d9f0dbSMarcelo Tosatti }
181262d9f0dbSMarcelo Tosatti 
18138ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu)
18148ad35755SDavid Hildenbrand {
18158ad35755SDavid Hildenbrand 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
18168ad35755SDavid Hildenbrand }
18178ad35755SDavid Hildenbrand 
18182c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
18192c70fe44SChristian Borntraeger {
1820785dbef4SChristian Borntraeger 	if (!vcpu->requests)
1821785dbef4SChristian Borntraeger 		return 0;
18228ad35755SDavid Hildenbrand retry:
18238e236546SChristian Borntraeger 	kvm_s390_vcpu_request_handled(vcpu);
18242c70fe44SChristian Borntraeger 	/*
18252c70fe44SChristian Borntraeger 	 * We use MMU_RELOAD just to re-arm the ipte notifier for the
18262c70fe44SChristian Borntraeger 	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
18272c70fe44SChristian Borntraeger 	 * This ensures that the ipte instruction for this request has
18282c70fe44SChristian Borntraeger 	 * already finished. We might race against a second unmapper that
18292c70fe44SChristian Borntraeger 	 * wants to set the blocking bit. Lets just retry the request loop.
18302c70fe44SChristian Borntraeger 	 */
18318ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
18322c70fe44SChristian Borntraeger 		int rc;
18332c70fe44SChristian Borntraeger 		rc = gmap_ipte_notify(vcpu->arch.gmap,
1834fda902cbSMichael Mueller 				      kvm_s390_get_prefix(vcpu),
18352c70fe44SChristian Borntraeger 				      PAGE_SIZE * 2);
18362c70fe44SChristian Borntraeger 		if (rc)
18372c70fe44SChristian Borntraeger 			return rc;
18388ad35755SDavid Hildenbrand 		goto retry;
18392c70fe44SChristian Borntraeger 	}
18408ad35755SDavid Hildenbrand 
1841d3d692c8SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1842d3d692c8SDavid Hildenbrand 		vcpu->arch.sie_block->ihcpu = 0xffff;
1843d3d692c8SDavid Hildenbrand 		goto retry;
1844d3d692c8SDavid Hildenbrand 	}
1845d3d692c8SDavid Hildenbrand 
18468ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
18478ad35755SDavid Hildenbrand 		if (!ibs_enabled(vcpu)) {
18488ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
18498ad35755SDavid Hildenbrand 			atomic_set_mask(CPUSTAT_IBS,
18508ad35755SDavid Hildenbrand 					&vcpu->arch.sie_block->cpuflags);
18518ad35755SDavid Hildenbrand 		}
18528ad35755SDavid Hildenbrand 		goto retry;
18538ad35755SDavid Hildenbrand 	}
18548ad35755SDavid Hildenbrand 
18558ad35755SDavid Hildenbrand 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
18568ad35755SDavid Hildenbrand 		if (ibs_enabled(vcpu)) {
18578ad35755SDavid Hildenbrand 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
18588ad35755SDavid Hildenbrand 			atomic_clear_mask(CPUSTAT_IBS,
18598ad35755SDavid Hildenbrand 					  &vcpu->arch.sie_block->cpuflags);
18608ad35755SDavid Hildenbrand 		}
18618ad35755SDavid Hildenbrand 		goto retry;
18628ad35755SDavid Hildenbrand 	}
18638ad35755SDavid Hildenbrand 
18640759d068SDavid Hildenbrand 	/* nothing to do, just clear the request */
18650759d068SDavid Hildenbrand 	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
18660759d068SDavid Hildenbrand 
18672c70fe44SChristian Borntraeger 	return 0;
18682c70fe44SChristian Borntraeger }
18692c70fe44SChristian Borntraeger 
1870fa576c58SThomas Huth /**
1871fa576c58SThomas Huth  * kvm_arch_fault_in_page - fault-in guest page if necessary
1872fa576c58SThomas Huth  * @vcpu: The corresponding virtual cpu
1873fa576c58SThomas Huth  * @gpa: Guest physical address
1874fa576c58SThomas Huth  * @writable: Whether the page should be writable or not
1875fa576c58SThomas Huth  *
1876fa576c58SThomas Huth  * Make sure that a guest page has been faulted-in on the host.
1877fa576c58SThomas Huth  *
1878fa576c58SThomas Huth  * Return: Zero on success, negative error code otherwise.
1879fa576c58SThomas Huth  */
1880fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
188124eb3a82SDominik Dingel {
1882527e30b4SMartin Schwidefsky 	return gmap_fault(vcpu->arch.gmap, gpa,
1883527e30b4SMartin Schwidefsky 			  writable ? FAULT_FLAG_WRITE : 0);
188424eb3a82SDominik Dingel }
188524eb3a82SDominik Dingel 
18863c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
18873c038e6bSDominik Dingel 				      unsigned long token)
18883c038e6bSDominik Dingel {
18893c038e6bSDominik Dingel 	struct kvm_s390_interrupt inti;
1890383d0b05SJens Freimann 	struct kvm_s390_irq irq;
18913c038e6bSDominik Dingel 
18923c038e6bSDominik Dingel 	if (start_token) {
1893383d0b05SJens Freimann 		irq.u.ext.ext_params2 = token;
1894383d0b05SJens Freimann 		irq.type = KVM_S390_INT_PFAULT_INIT;
1895383d0b05SJens Freimann 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
18963c038e6bSDominik Dingel 	} else {
18973c038e6bSDominik Dingel 		inti.type = KVM_S390_INT_PFAULT_DONE;
1898383d0b05SJens Freimann 		inti.parm64 = token;
18993c038e6bSDominik Dingel 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
19003c038e6bSDominik Dingel 	}
19013c038e6bSDominik Dingel }
19023c038e6bSDominik Dingel 
19033c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
19043c038e6bSDominik Dingel 				     struct kvm_async_pf *work)
19053c038e6bSDominik Dingel {
19063c038e6bSDominik Dingel 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
19073c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
19083c038e6bSDominik Dingel }
19093c038e6bSDominik Dingel 
19103c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
19113c038e6bSDominik Dingel 				 struct kvm_async_pf *work)
19123c038e6bSDominik Dingel {
19133c038e6bSDominik Dingel 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
19143c038e6bSDominik Dingel 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
19153c038e6bSDominik Dingel }
19163c038e6bSDominik Dingel 
19173c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
19183c038e6bSDominik Dingel 			       struct kvm_async_pf *work)
19193c038e6bSDominik Dingel {
19203c038e6bSDominik Dingel 	/* s390 will always inject the page directly */
19213c038e6bSDominik Dingel }
19223c038e6bSDominik Dingel 
19233c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
19243c038e6bSDominik Dingel {
19253c038e6bSDominik Dingel 	/*
19263c038e6bSDominik Dingel 	 * s390 will always inject the page directly,
19273c038e6bSDominik Dingel 	 * but we still want check_async_completion to cleanup
19283c038e6bSDominik Dingel 	 */
19293c038e6bSDominik Dingel 	return true;
19303c038e6bSDominik Dingel }
19313c038e6bSDominik Dingel 
19323c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
19333c038e6bSDominik Dingel {
19343c038e6bSDominik Dingel 	hva_t hva;
19353c038e6bSDominik Dingel 	struct kvm_arch_async_pf arch;
19363c038e6bSDominik Dingel 	int rc;
19373c038e6bSDominik Dingel 
19383c038e6bSDominik Dingel 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
19393c038e6bSDominik Dingel 		return 0;
19403c038e6bSDominik Dingel 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
19413c038e6bSDominik Dingel 	    vcpu->arch.pfault_compare)
19423c038e6bSDominik Dingel 		return 0;
19433c038e6bSDominik Dingel 	if (psw_extint_disabled(vcpu))
19443c038e6bSDominik Dingel 		return 0;
19459a022067SDavid Hildenbrand 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
19463c038e6bSDominik Dingel 		return 0;
19473c038e6bSDominik Dingel 	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
19483c038e6bSDominik Dingel 		return 0;
19493c038e6bSDominik Dingel 	if (!vcpu->arch.gmap->pfault_enabled)
19503c038e6bSDominik Dingel 		return 0;
19513c038e6bSDominik Dingel 
195281480cc1SHeiko Carstens 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
195381480cc1SHeiko Carstens 	hva += current->thread.gmap_addr & ~PAGE_MASK;
195481480cc1SHeiko Carstens 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
19553c038e6bSDominik Dingel 		return 0;
19563c038e6bSDominik Dingel 
19573c038e6bSDominik Dingel 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
19583c038e6bSDominik Dingel 	return rc;
19593c038e6bSDominik Dingel }
19603c038e6bSDominik Dingel 
19613fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1962b0c632dbSHeiko Carstens {
19633fb4c40fSThomas Huth 	int rc, cpuflags;
1964e168bf8dSCarsten Otte 
19653c038e6bSDominik Dingel 	/*
19663c038e6bSDominik Dingel 	 * On s390 notifications for arriving pages will be delivered directly
19673c038e6bSDominik Dingel 	 * to the guest but the house keeping for completed pfaults is
19683c038e6bSDominik Dingel 	 * handled outside the worker.
19693c038e6bSDominik Dingel 	 */
19703c038e6bSDominik Dingel 	kvm_check_async_pf_completion(vcpu);
19713c038e6bSDominik Dingel 
19725a32c1afSChristian Borntraeger 	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1973b0c632dbSHeiko Carstens 
1974b0c632dbSHeiko Carstens 	if (need_resched())
1975b0c632dbSHeiko Carstens 		schedule();
1976b0c632dbSHeiko Carstens 
1977d3a73acbSMartin Schwidefsky 	if (test_cpu_flag(CIF_MCCK_PENDING))
197871cde587SChristian Borntraeger 		s390_handle_mcck();
197971cde587SChristian Borntraeger 
198079395031SJens Freimann 	if (!kvm_is_ucontrol(vcpu->kvm)) {
198179395031SJens Freimann 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
198279395031SJens Freimann 		if (rc)
198379395031SJens Freimann 			return rc;
198479395031SJens Freimann 	}
19850ff31867SCarsten Otte 
19862c70fe44SChristian Borntraeger 	rc = kvm_s390_handle_requests(vcpu);
19872c70fe44SChristian Borntraeger 	if (rc)
19882c70fe44SChristian Borntraeger 		return rc;
19892c70fe44SChristian Borntraeger 
199027291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu)) {
199127291e21SDavid Hildenbrand 		kvm_s390_backup_guest_per_regs(vcpu);
199227291e21SDavid Hildenbrand 		kvm_s390_patch_guest_per_regs(vcpu);
199327291e21SDavid Hildenbrand 	}
199427291e21SDavid Hildenbrand 
1995b0c632dbSHeiko Carstens 	vcpu->arch.sie_block->icptcode = 0;
19963fb4c40fSThomas Huth 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
19973fb4c40fSThomas Huth 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
19983fb4c40fSThomas Huth 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
19992b29a9fdSDominik Dingel 
20003fb4c40fSThomas Huth 	return 0;
20013fb4c40fSThomas Huth }
20023fb4c40fSThomas Huth 
2003492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2004492d8642SThomas Huth {
2005492d8642SThomas Huth 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
2006492d8642SThomas Huth 	u8 opcode;
2007492d8642SThomas Huth 	int rc;
2008492d8642SThomas Huth 
2009492d8642SThomas Huth 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2010492d8642SThomas Huth 	trace_kvm_s390_sie_fault(vcpu);
2011492d8642SThomas Huth 
2012492d8642SThomas Huth 	/*
2013492d8642SThomas Huth 	 * We want to inject an addressing exception, which is defined as a
2014492d8642SThomas Huth 	 * suppressing or terminating exception. However, since we came here
2015492d8642SThomas Huth 	 * by a DAT access exception, the PSW still points to the faulting
2016492d8642SThomas Huth 	 * instruction since DAT exceptions are nullifying. So we've got
2017492d8642SThomas Huth 	 * to look up the current opcode to get the length of the instruction
2018492d8642SThomas Huth 	 * to be able to forward the PSW.
2019492d8642SThomas Huth 	 */
20208ae04b8fSAlexander Yarygin 	rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
2021492d8642SThomas Huth 	if (rc)
2022492d8642SThomas Huth 		return kvm_s390_inject_prog_cond(vcpu, rc);
2023492d8642SThomas Huth 	psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2024492d8642SThomas Huth 
2025492d8642SThomas Huth 	return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2026492d8642SThomas Huth }
2027492d8642SThomas Huth 
20283fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
20293fb4c40fSThomas Huth {
203024eb3a82SDominik Dingel 	int rc = -1;
20312b29a9fdSDominik Dingel 
20322b29a9fdSDominik Dingel 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
20332b29a9fdSDominik Dingel 		   vcpu->arch.sie_block->icptcode);
20342b29a9fdSDominik Dingel 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
20352b29a9fdSDominik Dingel 
203627291e21SDavid Hildenbrand 	if (guestdbg_enabled(vcpu))
203727291e21SDavid Hildenbrand 		kvm_s390_restore_guest_per_regs(vcpu);
203827291e21SDavid Hildenbrand 
20393fb4c40fSThomas Huth 	if (exit_reason >= 0) {
20407c470539SMartin Schwidefsky 		rc = 0;
2041210b1607SThomas Huth 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
2042210b1607SThomas Huth 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2043210b1607SThomas Huth 		vcpu->run->s390_ucontrol.trans_exc_code =
2044210b1607SThomas Huth 						current->thread.gmap_addr;
2045210b1607SThomas Huth 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
2046210b1607SThomas Huth 		rc = -EREMOTE;
204724eb3a82SDominik Dingel 
204824eb3a82SDominik Dingel 	} else if (current->thread.gmap_pfault) {
20493c038e6bSDominik Dingel 		trace_kvm_s390_major_guest_pfault(vcpu);
205024eb3a82SDominik Dingel 		current->thread.gmap_pfault = 0;
2051fa576c58SThomas Huth 		if (kvm_arch_setup_async_pf(vcpu)) {
205224eb3a82SDominik Dingel 			rc = 0;
2053fa576c58SThomas Huth 		} else {
2054fa576c58SThomas Huth 			gpa_t gpa = current->thread.gmap_addr;
2055fa576c58SThomas Huth 			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2056fa576c58SThomas Huth 		}
205724eb3a82SDominik Dingel 	}
205824eb3a82SDominik Dingel 
2059492d8642SThomas Huth 	if (rc == -1)
2060492d8642SThomas Huth 		rc = vcpu_post_run_fault_in_sie(vcpu);
2061b0c632dbSHeiko Carstens 
20625a32c1afSChristian Borntraeger 	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
20633fb4c40fSThomas Huth 
2064a76ccff6SThomas Huth 	if (rc == 0) {
2065a76ccff6SThomas Huth 		if (kvm_is_ucontrol(vcpu->kvm))
20662955c83fSChristian Borntraeger 			/* Don't exit for host interrupts. */
20672955c83fSChristian Borntraeger 			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
2068a76ccff6SThomas Huth 		else
2069a76ccff6SThomas Huth 			rc = kvm_handle_sie_intercept(vcpu);
2070a76ccff6SThomas Huth 	}
2071a76ccff6SThomas Huth 
20723fb4c40fSThomas Huth 	return rc;
20733fb4c40fSThomas Huth }
20743fb4c40fSThomas Huth 
20753fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu)
20763fb4c40fSThomas Huth {
20773fb4c40fSThomas Huth 	int rc, exit_reason;
20783fb4c40fSThomas Huth 
2079800c1065SThomas Huth 	/*
2080800c1065SThomas Huth 	 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2081800c1065SThomas Huth 	 * ning the guest), so that memslots (and other stuff) are protected
2082800c1065SThomas Huth 	 */
2083800c1065SThomas Huth 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2084800c1065SThomas Huth 
2085a76ccff6SThomas Huth 	do {
20863fb4c40fSThomas Huth 		rc = vcpu_pre_run(vcpu);
20873fb4c40fSThomas Huth 		if (rc)
2088a76ccff6SThomas Huth 			break;
20893fb4c40fSThomas Huth 
2090800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
20913fb4c40fSThomas Huth 		/*
2092a76ccff6SThomas Huth 		 * As PF_VCPU will be used in fault handler, between
2093a76ccff6SThomas Huth 		 * guest_enter and guest_exit should be no uaccess.
20943fb4c40fSThomas Huth 		 */
20950097d12eSChristian Borntraeger 		local_irq_disable();
20960097d12eSChristian Borntraeger 		__kvm_guest_enter();
20970097d12eSChristian Borntraeger 		local_irq_enable();
2098a76ccff6SThomas Huth 		exit_reason = sie64a(vcpu->arch.sie_block,
2099a76ccff6SThomas Huth 				     vcpu->run->s.regs.gprs);
21000097d12eSChristian Borntraeger 		local_irq_disable();
21010097d12eSChristian Borntraeger 		__kvm_guest_exit();
21020097d12eSChristian Borntraeger 		local_irq_enable();
2103800c1065SThomas Huth 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
21043fb4c40fSThomas Huth 
21053fb4c40fSThomas Huth 		rc = vcpu_post_run(vcpu, exit_reason);
210627291e21SDavid Hildenbrand 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
21073fb4c40fSThomas Huth 
2108800c1065SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2109e168bf8dSCarsten Otte 	return rc;
2110b0c632dbSHeiko Carstens }
2111b0c632dbSHeiko Carstens 
2112b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2113b028ee3eSDavid Hildenbrand {
2114b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2115b028ee3eSDavid Hildenbrand 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2116b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2117b028ee3eSDavid Hildenbrand 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2118b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2119b028ee3eSDavid Hildenbrand 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2120d3d692c8SDavid Hildenbrand 		/* some control register changes require a tlb flush */
2121d3d692c8SDavid Hildenbrand 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2122b028ee3eSDavid Hildenbrand 	}
2123b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2124b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2125b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2126b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2127b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2128b028ee3eSDavid Hildenbrand 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2129b028ee3eSDavid Hildenbrand 	}
2130b028ee3eSDavid Hildenbrand 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2131b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2132b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2133b028ee3eSDavid Hildenbrand 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
21349fbd8082SDavid Hildenbrand 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
21359fbd8082SDavid Hildenbrand 			kvm_clear_async_pf_completion_queue(vcpu);
2136b028ee3eSDavid Hildenbrand 	}
2137b028ee3eSDavid Hildenbrand 	kvm_run->kvm_dirty_regs = 0;
2138b028ee3eSDavid Hildenbrand }
2139b028ee3eSDavid Hildenbrand 
2140b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2141b028ee3eSDavid Hildenbrand {
2142b028ee3eSDavid Hildenbrand 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2143b028ee3eSDavid Hildenbrand 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2144b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2145b028ee3eSDavid Hildenbrand 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2146b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2147b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2148b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2149b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2150b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2151b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2152b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2153b028ee3eSDavid Hildenbrand 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2154b028ee3eSDavid Hildenbrand }
2155b028ee3eSDavid Hildenbrand 
2156b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2157b0c632dbSHeiko Carstens {
21588f2abe6aSChristian Borntraeger 	int rc;
2159b0c632dbSHeiko Carstens 	sigset_t sigsaved;
2160b0c632dbSHeiko Carstens 
216127291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu)) {
216227291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
216327291e21SDavid Hildenbrand 		return 0;
216427291e21SDavid Hildenbrand 	}
216527291e21SDavid Hildenbrand 
2166b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2167b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2168b0c632dbSHeiko Carstens 
21696352e4d2SDavid Hildenbrand 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
21706852d7b6SDavid Hildenbrand 		kvm_s390_vcpu_start(vcpu);
21716352e4d2SDavid Hildenbrand 	} else if (is_vcpu_stopped(vcpu)) {
2172ea2cdd27SDavid Hildenbrand 		pr_err_ratelimited("can't run stopped vcpu %d\n",
21736352e4d2SDavid Hildenbrand 				   vcpu->vcpu_id);
21746352e4d2SDavid Hildenbrand 		return -EINVAL;
21756352e4d2SDavid Hildenbrand 	}
2176b0c632dbSHeiko Carstens 
2177b028ee3eSDavid Hildenbrand 	sync_regs(vcpu, kvm_run);
2178d7b0b5ebSCarsten Otte 
2179dab4079dSHeiko Carstens 	might_fault();
2180e168bf8dSCarsten Otte 	rc = __vcpu_run(vcpu);
21819ace903dSChristian Ehrhardt 
2182b1d16c49SChristian Ehrhardt 	if (signal_pending(current) && !rc) {
2183b1d16c49SChristian Ehrhardt 		kvm_run->exit_reason = KVM_EXIT_INTR;
21848f2abe6aSChristian Borntraeger 		rc = -EINTR;
2185b1d16c49SChristian Ehrhardt 	}
21868f2abe6aSChristian Borntraeger 
218727291e21SDavid Hildenbrand 	if (guestdbg_exit_pending(vcpu) && !rc)  {
218827291e21SDavid Hildenbrand 		kvm_s390_prepare_debug_exit(vcpu);
218927291e21SDavid Hildenbrand 		rc = 0;
219027291e21SDavid Hildenbrand 	}
219127291e21SDavid Hildenbrand 
2192b8e660b8SHeiko Carstens 	if (rc == -EOPNOTSUPP) {
21938f2abe6aSChristian Borntraeger 		/* intercept cannot be handled in-kernel, prepare kvm-run */
21948f2abe6aSChristian Borntraeger 		kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
21958f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
21968f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
21978f2abe6aSChristian Borntraeger 		kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
21988f2abe6aSChristian Borntraeger 		rc = 0;
21998f2abe6aSChristian Borntraeger 	}
22008f2abe6aSChristian Borntraeger 
22018f2abe6aSChristian Borntraeger 	if (rc == -EREMOTE) {
22028f2abe6aSChristian Borntraeger 		/* intercept was handled, but userspace support is needed
22038f2abe6aSChristian Borntraeger 		 * kvm_run has been prepared by the handler */
22048f2abe6aSChristian Borntraeger 		rc = 0;
22058f2abe6aSChristian Borntraeger 	}
22068f2abe6aSChristian Borntraeger 
2207b028ee3eSDavid Hildenbrand 	store_regs(vcpu, kvm_run);
2208d7b0b5ebSCarsten Otte 
2209b0c632dbSHeiko Carstens 	if (vcpu->sigset_active)
2210b0c632dbSHeiko Carstens 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2211b0c632dbSHeiko Carstens 
2212b0c632dbSHeiko Carstens 	vcpu->stat.exit_userspace++;
22137e8e6ab4SHeiko Carstens 	return rc;
2214b0c632dbSHeiko Carstens }
2215b0c632dbSHeiko Carstens 
2216b0c632dbSHeiko Carstens /*
2217b0c632dbSHeiko Carstens  * store status at address
2218b0c632dbSHeiko Carstens  * we use have two special cases:
2219b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2220b0c632dbSHeiko Carstens  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2221b0c632dbSHeiko Carstens  */
2222d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2223b0c632dbSHeiko Carstens {
2224092670cdSCarsten Otte 	unsigned char archmode = 1;
2225fda902cbSMichael Mueller 	unsigned int px;
2226178bd789SThomas Huth 	u64 clkcomp;
2227d0bce605SHeiko Carstens 	int rc;
2228b0c632dbSHeiko Carstens 
2229d0bce605SHeiko Carstens 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2230d0bce605SHeiko Carstens 		if (write_guest_abs(vcpu, 163, &archmode, 1))
2231b0c632dbSHeiko Carstens 			return -EFAULT;
2232d0bce605SHeiko Carstens 		gpa = SAVE_AREA_BASE;
2233d0bce605SHeiko Carstens 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2234d0bce605SHeiko Carstens 		if (write_guest_real(vcpu, 163, &archmode, 1))
2235b0c632dbSHeiko Carstens 			return -EFAULT;
2236d0bce605SHeiko Carstens 		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2237d0bce605SHeiko Carstens 	}
2238d0bce605SHeiko Carstens 	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2239d0bce605SHeiko Carstens 			     vcpu->arch.guest_fpregs.fprs, 128);
2240d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2241d0bce605SHeiko Carstens 			      vcpu->run->s.regs.gprs, 128);
2242d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2243d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gpsw, 16);
2244fda902cbSMichael Mueller 	px = kvm_s390_get_prefix(vcpu);
2245d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
2246fda902cbSMichael Mueller 			      &px, 4);
2247d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu,
2248d0bce605SHeiko Carstens 			      gpa + offsetof(struct save_area, fp_ctrl_reg),
2249d0bce605SHeiko Carstens 			      &vcpu->arch.guest_fpregs.fpc, 4);
2250d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2251d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->todpr, 4);
2252d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2253d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->cputm, 8);
2254178bd789SThomas Huth 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
2255d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2256d0bce605SHeiko Carstens 			      &clkcomp, 8);
2257d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2258d0bce605SHeiko Carstens 			      &vcpu->run->s.regs.acrs, 64);
2259d0bce605SHeiko Carstens 	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2260d0bce605SHeiko Carstens 			      &vcpu->arch.sie_block->gcr, 128);
2261d0bce605SHeiko Carstens 	return rc ? -EFAULT : 0;
2262b0c632dbSHeiko Carstens }
2263b0c632dbSHeiko Carstens 
2264e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2265e879892cSThomas Huth {
2266e879892cSThomas Huth 	/*
2267e879892cSThomas Huth 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2268e879892cSThomas Huth 	 * copying in vcpu load/put. Lets update our copies before we save
2269e879892cSThomas Huth 	 * it into the save area
2270e879892cSThomas Huth 	 */
2271e879892cSThomas Huth 	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2272e879892cSThomas Huth 	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2273e879892cSThomas Huth 	save_access_regs(vcpu->run->s.regs.acrs);
2274e879892cSThomas Huth 
2275e879892cSThomas Huth 	return kvm_s390_store_status_unloaded(vcpu, addr);
2276e879892cSThomas Huth }
2277e879892cSThomas Huth 
2278bc17de7cSEric Farman /*
2279bc17de7cSEric Farman  * store additional status at address
2280bc17de7cSEric Farman  */
2281bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2282bc17de7cSEric Farman 					unsigned long gpa)
2283bc17de7cSEric Farman {
2284bc17de7cSEric Farman 	/* Only bits 0-53 are used for address formation */
2285bc17de7cSEric Farman 	if (!(gpa & ~0x3ff))
2286bc17de7cSEric Farman 		return 0;
2287bc17de7cSEric Farman 
2288bc17de7cSEric Farman 	return write_guest_abs(vcpu, gpa & ~0x3ff,
2289bc17de7cSEric Farman 			       (void *)&vcpu->run->s.regs.vrs, 512);
2290bc17de7cSEric Farman }
2291bc17de7cSEric Farman 
2292bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2293bc17de7cSEric Farman {
2294bc17de7cSEric Farman 	if (!test_kvm_facility(vcpu->kvm, 129))
2295bc17de7cSEric Farman 		return 0;
2296bc17de7cSEric Farman 
2297bc17de7cSEric Farman 	/*
2298bc17de7cSEric Farman 	 * The guest VXRS are in the host VXRs due to the lazy
2299bc17de7cSEric Farman 	 * copying in vcpu load/put. Let's update our copies before we save
2300bc17de7cSEric Farman 	 * it into the save area.
2301bc17de7cSEric Farman 	 */
2302bc17de7cSEric Farman 	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2303bc17de7cSEric Farman 
2304bc17de7cSEric Farman 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2305bc17de7cSEric Farman }
2306bc17de7cSEric Farman 
23078ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
23088ad35755SDavid Hildenbrand {
23098ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
23108e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
23118ad35755SDavid Hildenbrand }
23128ad35755SDavid Hildenbrand 
23138ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
23148ad35755SDavid Hildenbrand {
23158ad35755SDavid Hildenbrand 	unsigned int i;
23168ad35755SDavid Hildenbrand 	struct kvm_vcpu *vcpu;
23178ad35755SDavid Hildenbrand 
23188ad35755SDavid Hildenbrand 	kvm_for_each_vcpu(i, vcpu, kvm) {
23198ad35755SDavid Hildenbrand 		__disable_ibs_on_vcpu(vcpu);
23208ad35755SDavid Hildenbrand 	}
23218ad35755SDavid Hildenbrand }
23228ad35755SDavid Hildenbrand 
23238ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
23248ad35755SDavid Hildenbrand {
23258ad35755SDavid Hildenbrand 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
23268e236546SChristian Borntraeger 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
23278ad35755SDavid Hildenbrand }
23288ad35755SDavid Hildenbrand 
23296852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
23306852d7b6SDavid Hildenbrand {
23318ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
23328ad35755SDavid Hildenbrand 
23338ad35755SDavid Hildenbrand 	if (!is_vcpu_stopped(vcpu))
23348ad35755SDavid Hildenbrand 		return;
23358ad35755SDavid Hildenbrand 
23366852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
23378ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2338433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
23398ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
23408ad35755SDavid Hildenbrand 
23418ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
23428ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
23438ad35755SDavid Hildenbrand 			started_vcpus++;
23448ad35755SDavid Hildenbrand 	}
23458ad35755SDavid Hildenbrand 
23468ad35755SDavid Hildenbrand 	if (started_vcpus == 0) {
23478ad35755SDavid Hildenbrand 		/* we're the only active VCPU -> speed it up */
23488ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(vcpu);
23498ad35755SDavid Hildenbrand 	} else if (started_vcpus == 1) {
23508ad35755SDavid Hildenbrand 		/*
23518ad35755SDavid Hildenbrand 		 * As we are starting a second VCPU, we have to disable
23528ad35755SDavid Hildenbrand 		 * the IBS facility on all VCPUs to remove potentially
23538ad35755SDavid Hildenbrand 		 * oustanding ENABLE requests.
23548ad35755SDavid Hildenbrand 		 */
23558ad35755SDavid Hildenbrand 		__disable_ibs_on_all_vcpus(vcpu->kvm);
23568ad35755SDavid Hildenbrand 	}
23578ad35755SDavid Hildenbrand 
23586852d7b6SDavid Hildenbrand 	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23598ad35755SDavid Hildenbrand 	/*
23608ad35755SDavid Hildenbrand 	 * Another VCPU might have used IBS while we were offline.
23618ad35755SDavid Hildenbrand 	 * Let's play safe and flush the VCPU at startup.
23628ad35755SDavid Hildenbrand 	 */
2363d3d692c8SDavid Hildenbrand 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2364433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
23658ad35755SDavid Hildenbrand 	return;
23666852d7b6SDavid Hildenbrand }
23676852d7b6SDavid Hildenbrand 
23686852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
23696852d7b6SDavid Hildenbrand {
23708ad35755SDavid Hildenbrand 	int i, online_vcpus, started_vcpus = 0;
23718ad35755SDavid Hildenbrand 	struct kvm_vcpu *started_vcpu = NULL;
23728ad35755SDavid Hildenbrand 
23738ad35755SDavid Hildenbrand 	if (is_vcpu_stopped(vcpu))
23748ad35755SDavid Hildenbrand 		return;
23758ad35755SDavid Hildenbrand 
23766852d7b6SDavid Hildenbrand 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
23778ad35755SDavid Hildenbrand 	/* Only one cpu at a time may enter/leave the STOPPED state. */
2378433b9ee4SDavid Hildenbrand 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
23798ad35755SDavid Hildenbrand 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
23808ad35755SDavid Hildenbrand 
238132f5ff63SDavid Hildenbrand 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
23826cddd432SDavid Hildenbrand 	kvm_s390_clear_stop_irq(vcpu);
238332f5ff63SDavid Hildenbrand 
23846cddd432SDavid Hildenbrand 	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23858ad35755SDavid Hildenbrand 	__disable_ibs_on_vcpu(vcpu);
23868ad35755SDavid Hildenbrand 
23878ad35755SDavid Hildenbrand 	for (i = 0; i < online_vcpus; i++) {
23888ad35755SDavid Hildenbrand 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
23898ad35755SDavid Hildenbrand 			started_vcpus++;
23908ad35755SDavid Hildenbrand 			started_vcpu = vcpu->kvm->vcpus[i];
23918ad35755SDavid Hildenbrand 		}
23928ad35755SDavid Hildenbrand 	}
23938ad35755SDavid Hildenbrand 
23948ad35755SDavid Hildenbrand 	if (started_vcpus == 1) {
23958ad35755SDavid Hildenbrand 		/*
23968ad35755SDavid Hildenbrand 		 * As we only have one VCPU left, we want to enable the
23978ad35755SDavid Hildenbrand 		 * IBS facility for that VCPU to speed it up.
23988ad35755SDavid Hildenbrand 		 */
23998ad35755SDavid Hildenbrand 		__enable_ibs_on_vcpu(started_vcpu);
24008ad35755SDavid Hildenbrand 	}
24018ad35755SDavid Hildenbrand 
2402433b9ee4SDavid Hildenbrand 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
24038ad35755SDavid Hildenbrand 	return;
24046852d7b6SDavid Hildenbrand }
24056852d7b6SDavid Hildenbrand 
2406d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2407d6712df9SCornelia Huck 				     struct kvm_enable_cap *cap)
2408d6712df9SCornelia Huck {
2409d6712df9SCornelia Huck 	int r;
2410d6712df9SCornelia Huck 
2411d6712df9SCornelia Huck 	if (cap->flags)
2412d6712df9SCornelia Huck 		return -EINVAL;
2413d6712df9SCornelia Huck 
2414d6712df9SCornelia Huck 	switch (cap->cap) {
2415fa6b7fe9SCornelia Huck 	case KVM_CAP_S390_CSS_SUPPORT:
2416fa6b7fe9SCornelia Huck 		if (!vcpu->kvm->arch.css_support) {
2417fa6b7fe9SCornelia Huck 			vcpu->kvm->arch.css_support = 1;
2418c92ea7b9SChristian Borntraeger 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2419fa6b7fe9SCornelia Huck 			trace_kvm_s390_enable_css(vcpu->kvm);
2420fa6b7fe9SCornelia Huck 		}
2421fa6b7fe9SCornelia Huck 		r = 0;
2422fa6b7fe9SCornelia Huck 		break;
2423d6712df9SCornelia Huck 	default:
2424d6712df9SCornelia Huck 		r = -EINVAL;
2425d6712df9SCornelia Huck 		break;
2426d6712df9SCornelia Huck 	}
2427d6712df9SCornelia Huck 	return r;
2428d6712df9SCornelia Huck }
2429d6712df9SCornelia Huck 
243041408c28SThomas Huth static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
243141408c28SThomas Huth 				  struct kvm_s390_mem_op *mop)
243241408c28SThomas Huth {
243341408c28SThomas Huth 	void __user *uaddr = (void __user *)mop->buf;
243441408c28SThomas Huth 	void *tmpbuf = NULL;
243541408c28SThomas Huth 	int r, srcu_idx;
243641408c28SThomas Huth 	const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
243741408c28SThomas Huth 				    | KVM_S390_MEMOP_F_CHECK_ONLY;
243841408c28SThomas Huth 
243941408c28SThomas Huth 	if (mop->flags & ~supported_flags)
244041408c28SThomas Huth 		return -EINVAL;
244141408c28SThomas Huth 
244241408c28SThomas Huth 	if (mop->size > MEM_OP_MAX_SIZE)
244341408c28SThomas Huth 		return -E2BIG;
244441408c28SThomas Huth 
244541408c28SThomas Huth 	if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
244641408c28SThomas Huth 		tmpbuf = vmalloc(mop->size);
244741408c28SThomas Huth 		if (!tmpbuf)
244841408c28SThomas Huth 			return -ENOMEM;
244941408c28SThomas Huth 	}
245041408c28SThomas Huth 
245141408c28SThomas Huth 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
245241408c28SThomas Huth 
245341408c28SThomas Huth 	switch (mop->op) {
245441408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_READ:
245541408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
245641408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
245741408c28SThomas Huth 			break;
245841408c28SThomas Huth 		}
245941408c28SThomas Huth 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
246041408c28SThomas Huth 		if (r == 0) {
246141408c28SThomas Huth 			if (copy_to_user(uaddr, tmpbuf, mop->size))
246241408c28SThomas Huth 				r = -EFAULT;
246341408c28SThomas Huth 		}
246441408c28SThomas Huth 		break;
246541408c28SThomas Huth 	case KVM_S390_MEMOP_LOGICAL_WRITE:
246641408c28SThomas Huth 		if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
246741408c28SThomas Huth 			r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
246841408c28SThomas Huth 			break;
246941408c28SThomas Huth 		}
247041408c28SThomas Huth 		if (copy_from_user(tmpbuf, uaddr, mop->size)) {
247141408c28SThomas Huth 			r = -EFAULT;
247241408c28SThomas Huth 			break;
247341408c28SThomas Huth 		}
247441408c28SThomas Huth 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
247541408c28SThomas Huth 		break;
247641408c28SThomas Huth 	default:
247741408c28SThomas Huth 		r = -EINVAL;
247841408c28SThomas Huth 	}
247941408c28SThomas Huth 
248041408c28SThomas Huth 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
248141408c28SThomas Huth 
248241408c28SThomas Huth 	if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
248341408c28SThomas Huth 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
248441408c28SThomas Huth 
248541408c28SThomas Huth 	vfree(tmpbuf);
248641408c28SThomas Huth 	return r;
248741408c28SThomas Huth }
248841408c28SThomas Huth 
2489b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp,
2490b0c632dbSHeiko Carstens 			 unsigned int ioctl, unsigned long arg)
2491b0c632dbSHeiko Carstens {
2492b0c632dbSHeiko Carstens 	struct kvm_vcpu *vcpu = filp->private_data;
2493b0c632dbSHeiko Carstens 	void __user *argp = (void __user *)arg;
2494800c1065SThomas Huth 	int idx;
2495bc923cc9SAvi Kivity 	long r;
2496b0c632dbSHeiko Carstens 
249793736624SAvi Kivity 	switch (ioctl) {
249847b43c52SJens Freimann 	case KVM_S390_IRQ: {
249947b43c52SJens Freimann 		struct kvm_s390_irq s390irq;
250047b43c52SJens Freimann 
250147b43c52SJens Freimann 		r = -EFAULT;
250247b43c52SJens Freimann 		if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
250347b43c52SJens Freimann 			break;
250447b43c52SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
250547b43c52SJens Freimann 		break;
250647b43c52SJens Freimann 	}
250793736624SAvi Kivity 	case KVM_S390_INTERRUPT: {
2508ba5c1e9bSCarsten Otte 		struct kvm_s390_interrupt s390int;
2509383d0b05SJens Freimann 		struct kvm_s390_irq s390irq;
2510ba5c1e9bSCarsten Otte 
251193736624SAvi Kivity 		r = -EFAULT;
2512ba5c1e9bSCarsten Otte 		if (copy_from_user(&s390int, argp, sizeof(s390int)))
251393736624SAvi Kivity 			break;
2514383d0b05SJens Freimann 		if (s390int_to_s390irq(&s390int, &s390irq))
2515383d0b05SJens Freimann 			return -EINVAL;
2516383d0b05SJens Freimann 		r = kvm_s390_inject_vcpu(vcpu, &s390irq);
251793736624SAvi Kivity 		break;
2518ba5c1e9bSCarsten Otte 	}
2519b0c632dbSHeiko Carstens 	case KVM_S390_STORE_STATUS:
2520800c1065SThomas Huth 		idx = srcu_read_lock(&vcpu->kvm->srcu);
2521bc923cc9SAvi Kivity 		r = kvm_s390_vcpu_store_status(vcpu, arg);
2522800c1065SThomas Huth 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
2523bc923cc9SAvi Kivity 		break;
2524b0c632dbSHeiko Carstens 	case KVM_S390_SET_INITIAL_PSW: {
2525b0c632dbSHeiko Carstens 		psw_t psw;
2526b0c632dbSHeiko Carstens 
2527bc923cc9SAvi Kivity 		r = -EFAULT;
2528b0c632dbSHeiko Carstens 		if (copy_from_user(&psw, argp, sizeof(psw)))
2529bc923cc9SAvi Kivity 			break;
2530bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2531bc923cc9SAvi Kivity 		break;
2532b0c632dbSHeiko Carstens 	}
2533b0c632dbSHeiko Carstens 	case KVM_S390_INITIAL_RESET:
2534bc923cc9SAvi Kivity 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2535bc923cc9SAvi Kivity 		break;
253614eebd91SCarsten Otte 	case KVM_SET_ONE_REG:
253714eebd91SCarsten Otte 	case KVM_GET_ONE_REG: {
253814eebd91SCarsten Otte 		struct kvm_one_reg reg;
253914eebd91SCarsten Otte 		r = -EFAULT;
254014eebd91SCarsten Otte 		if (copy_from_user(&reg, argp, sizeof(reg)))
254114eebd91SCarsten Otte 			break;
254214eebd91SCarsten Otte 		if (ioctl == KVM_SET_ONE_REG)
254314eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
254414eebd91SCarsten Otte 		else
254514eebd91SCarsten Otte 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
254614eebd91SCarsten Otte 		break;
254714eebd91SCarsten Otte 	}
254827e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
254927e0393fSCarsten Otte 	case KVM_S390_UCAS_MAP: {
255027e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
255127e0393fSCarsten Otte 
255227e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
255327e0393fSCarsten Otte 			r = -EFAULT;
255427e0393fSCarsten Otte 			break;
255527e0393fSCarsten Otte 		}
255627e0393fSCarsten Otte 
255727e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
255827e0393fSCarsten Otte 			r = -EINVAL;
255927e0393fSCarsten Otte 			break;
256027e0393fSCarsten Otte 		}
256127e0393fSCarsten Otte 
256227e0393fSCarsten Otte 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
256327e0393fSCarsten Otte 				     ucasmap.vcpu_addr, ucasmap.length);
256427e0393fSCarsten Otte 		break;
256527e0393fSCarsten Otte 	}
256627e0393fSCarsten Otte 	case KVM_S390_UCAS_UNMAP: {
256727e0393fSCarsten Otte 		struct kvm_s390_ucas_mapping ucasmap;
256827e0393fSCarsten Otte 
256927e0393fSCarsten Otte 		if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
257027e0393fSCarsten Otte 			r = -EFAULT;
257127e0393fSCarsten Otte 			break;
257227e0393fSCarsten Otte 		}
257327e0393fSCarsten Otte 
257427e0393fSCarsten Otte 		if (!kvm_is_ucontrol(vcpu->kvm)) {
257527e0393fSCarsten Otte 			r = -EINVAL;
257627e0393fSCarsten Otte 			break;
257727e0393fSCarsten Otte 		}
257827e0393fSCarsten Otte 
257927e0393fSCarsten Otte 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
258027e0393fSCarsten Otte 			ucasmap.length);
258127e0393fSCarsten Otte 		break;
258227e0393fSCarsten Otte 	}
258327e0393fSCarsten Otte #endif
2584ccc7910fSCarsten Otte 	case KVM_S390_VCPU_FAULT: {
2585527e30b4SMartin Schwidefsky 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
2586ccc7910fSCarsten Otte 		break;
2587ccc7910fSCarsten Otte 	}
2588d6712df9SCornelia Huck 	case KVM_ENABLE_CAP:
2589d6712df9SCornelia Huck 	{
2590d6712df9SCornelia Huck 		struct kvm_enable_cap cap;
2591d6712df9SCornelia Huck 		r = -EFAULT;
2592d6712df9SCornelia Huck 		if (copy_from_user(&cap, argp, sizeof(cap)))
2593d6712df9SCornelia Huck 			break;
2594d6712df9SCornelia Huck 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2595d6712df9SCornelia Huck 		break;
2596d6712df9SCornelia Huck 	}
259741408c28SThomas Huth 	case KVM_S390_MEM_OP: {
259841408c28SThomas Huth 		struct kvm_s390_mem_op mem_op;
259941408c28SThomas Huth 
260041408c28SThomas Huth 		if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
260141408c28SThomas Huth 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
260241408c28SThomas Huth 		else
260341408c28SThomas Huth 			r = -EFAULT;
260441408c28SThomas Huth 		break;
260541408c28SThomas Huth 	}
2606816c7667SJens Freimann 	case KVM_S390_SET_IRQ_STATE: {
2607816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2608816c7667SJens Freimann 
2609816c7667SJens Freimann 		r = -EFAULT;
2610816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2611816c7667SJens Freimann 			break;
2612816c7667SJens Freimann 		if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2613816c7667SJens Freimann 		    irq_state.len == 0 ||
2614816c7667SJens Freimann 		    irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2615816c7667SJens Freimann 			r = -EINVAL;
2616816c7667SJens Freimann 			break;
2617816c7667SJens Freimann 		}
2618816c7667SJens Freimann 		r = kvm_s390_set_irq_state(vcpu,
2619816c7667SJens Freimann 					   (void __user *) irq_state.buf,
2620816c7667SJens Freimann 					   irq_state.len);
2621816c7667SJens Freimann 		break;
2622816c7667SJens Freimann 	}
2623816c7667SJens Freimann 	case KVM_S390_GET_IRQ_STATE: {
2624816c7667SJens Freimann 		struct kvm_s390_irq_state irq_state;
2625816c7667SJens Freimann 
2626816c7667SJens Freimann 		r = -EFAULT;
2627816c7667SJens Freimann 		if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2628816c7667SJens Freimann 			break;
2629816c7667SJens Freimann 		if (irq_state.len == 0) {
2630816c7667SJens Freimann 			r = -EINVAL;
2631816c7667SJens Freimann 			break;
2632816c7667SJens Freimann 		}
2633816c7667SJens Freimann 		r = kvm_s390_get_irq_state(vcpu,
2634816c7667SJens Freimann 					   (__u8 __user *)  irq_state.buf,
2635816c7667SJens Freimann 					   irq_state.len);
2636816c7667SJens Freimann 		break;
2637816c7667SJens Freimann 	}
2638b0c632dbSHeiko Carstens 	default:
26393e6afcf1SCarsten Otte 		r = -ENOTTY;
2640b0c632dbSHeiko Carstens 	}
2641bc923cc9SAvi Kivity 	return r;
2642b0c632dbSHeiko Carstens }
2643b0c632dbSHeiko Carstens 
26445b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
26455b1c1493SCarsten Otte {
26465b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL
26475b1c1493SCarsten Otte 	if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
26485b1c1493SCarsten Otte 		 && (kvm_is_ucontrol(vcpu->kvm))) {
26495b1c1493SCarsten Otte 		vmf->page = virt_to_page(vcpu->arch.sie_block);
26505b1c1493SCarsten Otte 		get_page(vmf->page);
26515b1c1493SCarsten Otte 		return 0;
26525b1c1493SCarsten Otte 	}
26535b1c1493SCarsten Otte #endif
26545b1c1493SCarsten Otte 	return VM_FAULT_SIGBUS;
26555b1c1493SCarsten Otte }
26565b1c1493SCarsten Otte 
26575587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
26585587027cSAneesh Kumar K.V 			    unsigned long npages)
2659db3fe4ebSTakuya Yoshikawa {
2660db3fe4ebSTakuya Yoshikawa 	return 0;
2661db3fe4ebSTakuya Yoshikawa }
2662db3fe4ebSTakuya Yoshikawa 
2663b0c632dbSHeiko Carstens /* Section: memory related */
2664f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
2665f7784b8eSMarcelo Tosatti 				   struct kvm_memory_slot *memslot,
266609170a49SPaolo Bonzini 				   const struct kvm_userspace_memory_region *mem,
26677b6195a9STakuya Yoshikawa 				   enum kvm_mr_change change)
2668b0c632dbSHeiko Carstens {
2669dd2887e7SNick Wang 	/* A few sanity checks. We can have memory slots which have to be
2670dd2887e7SNick Wang 	   located/ended at a segment boundary (1MB). The memory in userland is
2671dd2887e7SNick Wang 	   ok to be fragmented into various different vmas. It is okay to mmap()
2672dd2887e7SNick Wang 	   and munmap() stuff in this slot after doing this call at any time */
2673b0c632dbSHeiko Carstens 
2674598841caSCarsten Otte 	if (mem->userspace_addr & 0xffffful)
2675b0c632dbSHeiko Carstens 		return -EINVAL;
2676b0c632dbSHeiko Carstens 
2677598841caSCarsten Otte 	if (mem->memory_size & 0xffffful)
2678b0c632dbSHeiko Carstens 		return -EINVAL;
2679b0c632dbSHeiko Carstens 
2680f7784b8eSMarcelo Tosatti 	return 0;
2681f7784b8eSMarcelo Tosatti }
2682f7784b8eSMarcelo Tosatti 
2683f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
268409170a49SPaolo Bonzini 				const struct kvm_userspace_memory_region *mem,
26858482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
2686f36f3f28SPaolo Bonzini 				const struct kvm_memory_slot *new,
26878482644aSTakuya Yoshikawa 				enum kvm_mr_change change)
2688f7784b8eSMarcelo Tosatti {
2689f7850c92SCarsten Otte 	int rc;
2690f7784b8eSMarcelo Tosatti 
26912cef4debSChristian Borntraeger 	/* If the basics of the memslot do not change, we do not want
26922cef4debSChristian Borntraeger 	 * to update the gmap. Every update causes several unnecessary
26932cef4debSChristian Borntraeger 	 * segment translation exceptions. This is usually handled just
26942cef4debSChristian Borntraeger 	 * fine by the normal fault handler + gmap, but it will also
26952cef4debSChristian Borntraeger 	 * cause faults on the prefix page of running guest CPUs.
26962cef4debSChristian Borntraeger 	 */
26972cef4debSChristian Borntraeger 	if (old->userspace_addr == mem->userspace_addr &&
26982cef4debSChristian Borntraeger 	    old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
26992cef4debSChristian Borntraeger 	    old->npages * PAGE_SIZE == mem->memory_size)
27002cef4debSChristian Borntraeger 		return;
2701598841caSCarsten Otte 
2702598841caSCarsten Otte 	rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2703598841caSCarsten Otte 		mem->guest_phys_addr, mem->memory_size);
2704598841caSCarsten Otte 	if (rc)
2705ea2cdd27SDavid Hildenbrand 		pr_warn("failed to commit memory region\n");
2706598841caSCarsten Otte 	return;
2707b0c632dbSHeiko Carstens }
2708b0c632dbSHeiko Carstens 
2709b0c632dbSHeiko Carstens static int __init kvm_s390_init(void)
2710b0c632dbSHeiko Carstens {
27119d8d5786SMichael Mueller 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2712b0c632dbSHeiko Carstens }
2713b0c632dbSHeiko Carstens 
2714b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void)
2715b0c632dbSHeiko Carstens {
2716b0c632dbSHeiko Carstens 	kvm_exit();
2717b0c632dbSHeiko Carstens }
2718b0c632dbSHeiko Carstens 
2719b0c632dbSHeiko Carstens module_init(kvm_s390_init);
2720b0c632dbSHeiko Carstens module_exit(kvm_s390_exit);
2721566af940SCornelia Huck 
2722566af940SCornelia Huck /*
2723566af940SCornelia Huck  * Enable autoloading of the kvm module.
2724566af940SCornelia Huck  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2725566af940SCornelia Huck  * since x86 takes a different approach.
2726566af940SCornelia Huck  */
2727566af940SCornelia Huck #include <linux/miscdevice.h>
2728566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR);
2729566af940SCornelia Huck MODULE_ALIAS("devname:kvm");
2730