xref: /openbmc/linux/arch/s390/kvm/intercept.c (revision 05bcf503)
1 /*
2  * in-kernel handling for sie intercepts
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13 
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17 
18 #include <asm/kvm_host.h>
19 
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 #include "trace.h"
23 #include "trace-s390.h"
24 
25 static int handle_lctlg(struct kvm_vcpu *vcpu)
26 {
27 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
28 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
29 	int base2 = vcpu->arch.sie_block->ipb >> 28;
30 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
31 			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
32 	u64 useraddr;
33 	int reg, rc;
34 
35 	vcpu->stat.instruction_lctlg++;
36 	if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
37 		return -EOPNOTSUPP;
38 
39 	useraddr = disp2;
40 	if (base2)
41 		useraddr += vcpu->run->s.regs.gprs[base2];
42 
43 	if (useraddr & 7)
44 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
45 
46 	reg = reg1;
47 
48 	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
49 		   disp2);
50 	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
51 
52 	do {
53 		rc = get_guest_u64(vcpu, useraddr,
54 				   &vcpu->arch.sie_block->gcr[reg]);
55 		if (rc == -EFAULT) {
56 			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
57 			break;
58 		}
59 		useraddr += 8;
60 		if (reg == reg3)
61 			break;
62 		reg = (reg + 1) % 16;
63 	} while (1);
64 	return 0;
65 }
66 
67 static int handle_lctl(struct kvm_vcpu *vcpu)
68 {
69 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
70 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
71 	int base2 = vcpu->arch.sie_block->ipb >> 28;
72 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
73 	u64 useraddr;
74 	u32 val = 0;
75 	int reg, rc;
76 
77 	vcpu->stat.instruction_lctl++;
78 
79 	useraddr = disp2;
80 	if (base2)
81 		useraddr += vcpu->run->s.regs.gprs[base2];
82 
83 	if (useraddr & 3)
84 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
85 
86 	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
87 		   disp2);
88 	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
89 
90 	reg = reg1;
91 	do {
92 		rc = get_guest_u32(vcpu, useraddr, &val);
93 		if (rc == -EFAULT) {
94 			kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
95 			break;
96 		}
97 		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
98 		vcpu->arch.sie_block->gcr[reg] |= val;
99 		useraddr += 4;
100 		if (reg == reg3)
101 			break;
102 		reg = (reg + 1) % 16;
103 	} while (1);
104 	return 0;
105 }
106 
107 static intercept_handler_t instruction_handlers[256] = {
108 	[0x01] = kvm_s390_handle_01,
109 	[0x83] = kvm_s390_handle_diag,
110 	[0xae] = kvm_s390_handle_sigp,
111 	[0xb2] = kvm_s390_handle_b2,
112 	[0xb7] = handle_lctl,
113 	[0xe5] = kvm_s390_handle_e5,
114 	[0xeb] = handle_lctlg,
115 };
116 
117 static int handle_noop(struct kvm_vcpu *vcpu)
118 {
119 	switch (vcpu->arch.sie_block->icptcode) {
120 	case 0x0:
121 		vcpu->stat.exit_null++;
122 		break;
123 	case 0x10:
124 		vcpu->stat.exit_external_request++;
125 		break;
126 	case 0x14:
127 		vcpu->stat.exit_external_interrupt++;
128 		break;
129 	default:
130 		break; /* nothing */
131 	}
132 	return 0;
133 }
134 
135 static int handle_stop(struct kvm_vcpu *vcpu)
136 {
137 	int rc = 0;
138 
139 	vcpu->stat.exit_stop_request++;
140 	spin_lock_bh(&vcpu->arch.local_int.lock);
141 
142 	trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
143 
144 	if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
145 		vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
146 		rc = SIE_INTERCEPT_RERUNVCPU;
147 		vcpu->run->exit_reason = KVM_EXIT_INTR;
148 	}
149 
150 	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
151 		atomic_set_mask(CPUSTAT_STOPPED,
152 				&vcpu->arch.sie_block->cpuflags);
153 		vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
154 		VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
155 		rc = -EOPNOTSUPP;
156 	}
157 
158 	if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
159 		vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
160 		/* store status must be called unlocked. Since local_int.lock
161 		 * only protects local_int.* and not guest memory we can give
162 		 * up the lock here */
163 		spin_unlock_bh(&vcpu->arch.local_int.lock);
164 		rc = kvm_s390_vcpu_store_status(vcpu,
165 						KVM_S390_STORE_STATUS_NOADDR);
166 		if (rc >= 0)
167 			rc = -EOPNOTSUPP;
168 	} else
169 		spin_unlock_bh(&vcpu->arch.local_int.lock);
170 	return rc;
171 }
172 
173 static int handle_validity(struct kvm_vcpu *vcpu)
174 {
175 	unsigned long vmaddr;
176 	int viwhy = vcpu->arch.sie_block->ipb >> 16;
177 	int rc;
178 
179 	vcpu->stat.exit_validity++;
180 	trace_kvm_s390_intercept_validity(vcpu, viwhy);
181 	if (viwhy == 0x37) {
182 		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
183 				    vcpu->arch.gmap);
184 		if (IS_ERR_VALUE(vmaddr)) {
185 			rc = -EOPNOTSUPP;
186 			goto out;
187 		}
188 		rc = fault_in_pages_writeable((char __user *) vmaddr,
189 			 PAGE_SIZE);
190 		if (rc) {
191 			/* user will receive sigsegv, exit to user */
192 			rc = -EOPNOTSUPP;
193 			goto out;
194 		}
195 		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
196 				    vcpu->arch.gmap);
197 		if (IS_ERR_VALUE(vmaddr)) {
198 			rc = -EOPNOTSUPP;
199 			goto out;
200 		}
201 		rc = fault_in_pages_writeable((char __user *) vmaddr,
202 			 PAGE_SIZE);
203 		if (rc) {
204 			/* user will receive sigsegv, exit to user */
205 			rc = -EOPNOTSUPP;
206 			goto out;
207 		}
208 	} else
209 		rc = -EOPNOTSUPP;
210 
211 out:
212 	if (rc)
213 		VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
214 			   viwhy);
215 	return rc;
216 }
217 
218 static int handle_instruction(struct kvm_vcpu *vcpu)
219 {
220 	intercept_handler_t handler;
221 
222 	vcpu->stat.exit_instruction++;
223 	trace_kvm_s390_intercept_instruction(vcpu,
224 					     vcpu->arch.sie_block->ipa,
225 					     vcpu->arch.sie_block->ipb);
226 	handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
227 	if (handler)
228 		return handler(vcpu);
229 	return -EOPNOTSUPP;
230 }
231 
232 static int handle_prog(struct kvm_vcpu *vcpu)
233 {
234 	vcpu->stat.exit_program_interruption++;
235 	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
236 	return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
237 }
238 
239 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
240 {
241 	int rc, rc2;
242 
243 	vcpu->stat.exit_instr_and_program++;
244 	rc = handle_instruction(vcpu);
245 	rc2 = handle_prog(vcpu);
246 
247 	if (rc == -EOPNOTSUPP)
248 		vcpu->arch.sie_block->icptcode = 0x04;
249 	if (rc)
250 		return rc;
251 	return rc2;
252 }
253 
254 static const intercept_handler_t intercept_funcs[] = {
255 	[0x00 >> 2] = handle_noop,
256 	[0x04 >> 2] = handle_instruction,
257 	[0x08 >> 2] = handle_prog,
258 	[0x0C >> 2] = handle_instruction_and_prog,
259 	[0x10 >> 2] = handle_noop,
260 	[0x14 >> 2] = handle_noop,
261 	[0x1C >> 2] = kvm_s390_handle_wait,
262 	[0x20 >> 2] = handle_validity,
263 	[0x28 >> 2] = handle_stop,
264 };
265 
266 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
267 {
268 	intercept_handler_t func;
269 	u8 code = vcpu->arch.sie_block->icptcode;
270 
271 	if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
272 		return -EOPNOTSUPP;
273 	func = intercept_funcs[code >> 2];
274 	if (func)
275 		return func(vcpu);
276 	return -EOPNOTSUPP;
277 }
278