xref: /openbmc/linux/arch/s390/kvm/intercept.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * in-kernel handling for sie intercepts
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13 
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17 
18 #include <asm/kvm_host.h>
19 
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 #include "trace.h"
23 #include "trace-s390.h"
24 
25 static int handle_lctlg(struct kvm_vcpu *vcpu)
26 {
27 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
28 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
29 	u64 useraddr;
30 	int reg, rc;
31 
32 	vcpu->stat.instruction_lctlg++;
33 
34 	useraddr = kvm_s390_get_base_disp_rsy(vcpu);
35 
36 	if (useraddr & 7)
37 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
38 
39 	reg = reg1;
40 
41 	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
42 		   useraddr);
43 	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
44 
45 	do {
46 		rc = get_guest_u64(vcpu, useraddr,
47 				   &vcpu->arch.sie_block->gcr[reg]);
48 		if (rc == -EFAULT) {
49 			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 			break;
51 		}
52 		useraddr += 8;
53 		if (reg == reg3)
54 			break;
55 		reg = (reg + 1) % 16;
56 	} while (1);
57 	return 0;
58 }
59 
60 static int handle_lctl(struct kvm_vcpu *vcpu)
61 {
62 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
63 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
64 	u64 useraddr;
65 	u32 val = 0;
66 	int reg, rc;
67 
68 	vcpu->stat.instruction_lctl++;
69 
70 	useraddr = kvm_s390_get_base_disp_rs(vcpu);
71 
72 	if (useraddr & 3)
73 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
74 
75 	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
76 		   useraddr);
77 	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
78 
79 	reg = reg1;
80 	do {
81 		rc = get_guest_u32(vcpu, useraddr, &val);
82 		if (rc == -EFAULT) {
83 			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
84 			break;
85 		}
86 		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
87 		vcpu->arch.sie_block->gcr[reg] |= val;
88 		useraddr += 4;
89 		if (reg == reg3)
90 			break;
91 		reg = (reg + 1) % 16;
92 	} while (1);
93 	return 0;
94 }
95 
96 static const intercept_handler_t eb_handlers[256] = {
97 	[0x2f] = handle_lctlg,
98 	[0x8a] = kvm_s390_handle_priv_eb,
99 };
100 
101 static int handle_eb(struct kvm_vcpu *vcpu)
102 {
103 	intercept_handler_t handler;
104 
105 	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
106 	if (handler)
107 		return handler(vcpu);
108 	return -EOPNOTSUPP;
109 }
110 
111 static const intercept_handler_t instruction_handlers[256] = {
112 	[0x01] = kvm_s390_handle_01,
113 	[0x82] = kvm_s390_handle_lpsw,
114 	[0x83] = kvm_s390_handle_diag,
115 	[0xae] = kvm_s390_handle_sigp,
116 	[0xb2] = kvm_s390_handle_b2,
117 	[0xb7] = handle_lctl,
118 	[0xb9] = kvm_s390_handle_b9,
119 	[0xe5] = kvm_s390_handle_e5,
120 	[0xeb] = handle_eb,
121 };
122 
123 static int handle_noop(struct kvm_vcpu *vcpu)
124 {
125 	switch (vcpu->arch.sie_block->icptcode) {
126 	case 0x0:
127 		vcpu->stat.exit_null++;
128 		break;
129 	case 0x10:
130 		vcpu->stat.exit_external_request++;
131 		break;
132 	case 0x14:
133 		vcpu->stat.exit_external_interrupt++;
134 		break;
135 	default:
136 		break; /* nothing */
137 	}
138 	return 0;
139 }
140 
141 static int handle_stop(struct kvm_vcpu *vcpu)
142 {
143 	int rc = 0;
144 
145 	vcpu->stat.exit_stop_request++;
146 	spin_lock_bh(&vcpu->arch.local_int.lock);
147 
148 	trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
149 
150 	if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
151 		vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
152 		rc = SIE_INTERCEPT_RERUNVCPU;
153 		vcpu->run->exit_reason = KVM_EXIT_INTR;
154 	}
155 
156 	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
157 		atomic_set_mask(CPUSTAT_STOPPED,
158 				&vcpu->arch.sie_block->cpuflags);
159 		vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
160 		VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
161 		rc = -EOPNOTSUPP;
162 	}
163 
164 	if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
165 		vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
166 		/* store status must be called unlocked. Since local_int.lock
167 		 * only protects local_int.* and not guest memory we can give
168 		 * up the lock here */
169 		spin_unlock_bh(&vcpu->arch.local_int.lock);
170 		rc = kvm_s390_vcpu_store_status(vcpu,
171 						KVM_S390_STORE_STATUS_NOADDR);
172 		if (rc >= 0)
173 			rc = -EOPNOTSUPP;
174 	} else
175 		spin_unlock_bh(&vcpu->arch.local_int.lock);
176 	return rc;
177 }
178 
179 static int handle_validity(struct kvm_vcpu *vcpu)
180 {
181 	unsigned long vmaddr;
182 	int viwhy = vcpu->arch.sie_block->ipb >> 16;
183 	int rc;
184 
185 	vcpu->stat.exit_validity++;
186 	trace_kvm_s390_intercept_validity(vcpu, viwhy);
187 	if (viwhy == 0x37) {
188 		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
189 				    vcpu->arch.gmap);
190 		if (IS_ERR_VALUE(vmaddr)) {
191 			rc = -EOPNOTSUPP;
192 			goto out;
193 		}
194 		rc = fault_in_pages_writeable((char __user *) vmaddr,
195 			 PAGE_SIZE);
196 		if (rc) {
197 			/* user will receive sigsegv, exit to user */
198 			rc = -EOPNOTSUPP;
199 			goto out;
200 		}
201 		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
202 				    vcpu->arch.gmap);
203 		if (IS_ERR_VALUE(vmaddr)) {
204 			rc = -EOPNOTSUPP;
205 			goto out;
206 		}
207 		rc = fault_in_pages_writeable((char __user *) vmaddr,
208 			 PAGE_SIZE);
209 		if (rc) {
210 			/* user will receive sigsegv, exit to user */
211 			rc = -EOPNOTSUPP;
212 			goto out;
213 		}
214 	} else
215 		rc = -EOPNOTSUPP;
216 
217 out:
218 	if (rc)
219 		VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
220 			   viwhy);
221 	return rc;
222 }
223 
224 static int handle_instruction(struct kvm_vcpu *vcpu)
225 {
226 	intercept_handler_t handler;
227 
228 	vcpu->stat.exit_instruction++;
229 	trace_kvm_s390_intercept_instruction(vcpu,
230 					     vcpu->arch.sie_block->ipa,
231 					     vcpu->arch.sie_block->ipb);
232 	handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
233 	if (handler)
234 		return handler(vcpu);
235 	return -EOPNOTSUPP;
236 }
237 
238 static int handle_prog(struct kvm_vcpu *vcpu)
239 {
240 	vcpu->stat.exit_program_interruption++;
241 	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
242 	return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
243 }
244 
245 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
246 {
247 	int rc, rc2;
248 
249 	vcpu->stat.exit_instr_and_program++;
250 	rc = handle_instruction(vcpu);
251 	rc2 = handle_prog(vcpu);
252 
253 	if (rc == -EOPNOTSUPP)
254 		vcpu->arch.sie_block->icptcode = 0x04;
255 	if (rc)
256 		return rc;
257 	return rc2;
258 }
259 
260 static const intercept_handler_t intercept_funcs[] = {
261 	[0x00 >> 2] = handle_noop,
262 	[0x04 >> 2] = handle_instruction,
263 	[0x08 >> 2] = handle_prog,
264 	[0x0C >> 2] = handle_instruction_and_prog,
265 	[0x10 >> 2] = handle_noop,
266 	[0x14 >> 2] = handle_noop,
267 	[0x18 >> 2] = handle_noop,
268 	[0x1C >> 2] = kvm_s390_handle_wait,
269 	[0x20 >> 2] = handle_validity,
270 	[0x28 >> 2] = handle_stop,
271 };
272 
273 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
274 {
275 	intercept_handler_t func;
276 	u8 code = vcpu->arch.sie_block->icptcode;
277 
278 	if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
279 		return -EOPNOTSUPP;
280 	func = intercept_funcs[code >> 2];
281 	if (func)
282 		return func(vcpu);
283 	return -EOPNOTSUPP;
284 }
285