xref: /openbmc/linux/arch/s390/kvm/sigp.c (revision c819e2cf)
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14 
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22 
23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
24 			u64 *reg)
25 {
26 	struct kvm_s390_local_interrupt *li;
27 	int cpuflags;
28 	int rc;
29 
30 	li = &dst_vcpu->arch.local_int;
31 
32 	cpuflags = atomic_read(li->cpuflags);
33 	if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
34 		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
35 	else {
36 		*reg &= 0xffffffff00000000UL;
37 		if (cpuflags & CPUSTAT_ECALL_PEND)
38 			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
39 		if (cpuflags & CPUSTAT_STOPPED)
40 			*reg |= SIGP_STATUS_STOPPED;
41 		rc = SIGP_CC_STATUS_STORED;
42 	}
43 
44 	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
45 		   rc);
46 	return rc;
47 }
48 
49 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
50 				    struct kvm_vcpu *dst_vcpu)
51 {
52 	struct kvm_s390_irq irq = {
53 		.type = KVM_S390_INT_EMERGENCY,
54 		.u.emerg.code = vcpu->vcpu_id,
55 	};
56 	int rc = 0;
57 
58 	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
59 	if (!rc)
60 		VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
61 			   dst_vcpu->vcpu_id);
62 
63 	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
64 }
65 
66 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
67 {
68 	return __inject_sigp_emergency(vcpu, dst_vcpu);
69 }
70 
71 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
72 					struct kvm_vcpu *dst_vcpu,
73 					u16 asn, u64 *reg)
74 {
75 	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
76 	u16 p_asn, s_asn;
77 	psw_t *psw;
78 	u32 flags;
79 
80 	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
81 	psw = &dst_vcpu->arch.sie_block->gpsw;
82 	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
83 	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */
84 
85 	/* Inject the emergency signal? */
86 	if (!(flags & CPUSTAT_STOPPED)
87 	    || (psw->mask & psw_int_mask) != psw_int_mask
88 	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
89 	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
90 		return __inject_sigp_emergency(vcpu, dst_vcpu);
91 	} else {
92 		*reg &= 0xffffffff00000000UL;
93 		*reg |= SIGP_STATUS_INCORRECT_STATE;
94 		return SIGP_CC_STATUS_STORED;
95 	}
96 }
97 
98 static int __sigp_external_call(struct kvm_vcpu *vcpu,
99 				struct kvm_vcpu *dst_vcpu)
100 {
101 	struct kvm_s390_irq irq = {
102 		.type = KVM_S390_INT_EXTERNAL_CALL,
103 		.u.extcall.code = vcpu->vcpu_id,
104 	};
105 	int rc;
106 
107 	rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
108 	if (!rc)
109 		VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
110 			   dst_vcpu->vcpu_id);
111 
112 	return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
113 }
114 
115 static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
116 {
117 	struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
118 	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
119 
120 	spin_lock(&li->lock);
121 	if (li->action_bits & ACTION_STOP_ON_STOP) {
122 		/* another SIGP STOP is pending */
123 		rc = SIGP_CC_BUSY;
124 		goto out;
125 	}
126 	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
127 		if ((action & ACTION_STORE_ON_STOP) != 0)
128 			rc = -ESHUTDOWN;
129 		goto out;
130 	}
131 	set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
132 	li->action_bits |= action;
133 	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
134 	kvm_s390_vcpu_wakeup(dst_vcpu);
135 out:
136 	spin_unlock(&li->lock);
137 
138 	return rc;
139 }
140 
141 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
142 {
143 	int rc;
144 
145 	rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
146 	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
147 
148 	return rc;
149 }
150 
151 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
152 					struct kvm_vcpu *dst_vcpu, u64 *reg)
153 {
154 	int rc;
155 
156 	rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
157 					      ACTION_STORE_ON_STOP);
158 	VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
159 		   dst_vcpu->vcpu_id);
160 
161 	if (rc == -ESHUTDOWN) {
162 		/* If the CPU has already been stopped, we still have
163 		 * to save the status when doing stop-and-store. This
164 		 * has to be done after unlocking all spinlocks. */
165 		rc = kvm_s390_store_status_unloaded(dst_vcpu,
166 						KVM_S390_STORE_STATUS_NOADDR);
167 	}
168 
169 	return rc;
170 }
171 
172 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
173 {
174 	int rc;
175 	unsigned int i;
176 	struct kvm_vcpu *v;
177 
178 	switch (parameter & 0xff) {
179 	case 0:
180 		rc = SIGP_CC_NOT_OPERATIONAL;
181 		break;
182 	case 1:
183 	case 2:
184 		kvm_for_each_vcpu(i, v, vcpu->kvm) {
185 			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
186 			kvm_clear_async_pf_completion_queue(v);
187 		}
188 
189 		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
190 		break;
191 	default:
192 		rc = -EOPNOTSUPP;
193 	}
194 	return rc;
195 }
196 
197 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
198 			     u32 address, u64 *reg)
199 {
200 	struct kvm_s390_local_interrupt *li;
201 	int rc;
202 
203 	li = &dst_vcpu->arch.local_int;
204 
205 	/*
206 	 * Make sure the new value is valid memory. We only need to check the
207 	 * first page, since address is 8k aligned and memory pieces are always
208 	 * at least 1MB aligned and have at least a size of 1MB.
209 	 */
210 	address &= 0x7fffe000u;
211 	if (kvm_is_error_gpa(vcpu->kvm, address)) {
212 		*reg &= 0xffffffff00000000UL;
213 		*reg |= SIGP_STATUS_INVALID_PARAMETER;
214 		return SIGP_CC_STATUS_STORED;
215 	}
216 
217 	spin_lock(&li->lock);
218 	/* cpu must be in stopped state */
219 	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
220 		*reg &= 0xffffffff00000000UL;
221 		*reg |= SIGP_STATUS_INCORRECT_STATE;
222 		rc = SIGP_CC_STATUS_STORED;
223 		goto out_li;
224 	}
225 
226 	li->irq.prefix.address = address;
227 	set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
228 	kvm_s390_vcpu_wakeup(dst_vcpu);
229 	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
230 
231 	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
232 		   address);
233 out_li:
234 	spin_unlock(&li->lock);
235 	return rc;
236 }
237 
238 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
239 				       struct kvm_vcpu *dst_vcpu,
240 				       u32 addr, u64 *reg)
241 {
242 	int flags;
243 	int rc;
244 
245 	spin_lock(&dst_vcpu->arch.local_int.lock);
246 	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
247 	spin_unlock(&dst_vcpu->arch.local_int.lock);
248 	if (!(flags & CPUSTAT_STOPPED)) {
249 		*reg &= 0xffffffff00000000UL;
250 		*reg |= SIGP_STATUS_INCORRECT_STATE;
251 		return SIGP_CC_STATUS_STORED;
252 	}
253 
254 	addr &= 0x7ffffe00;
255 	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
256 	if (rc == -EFAULT) {
257 		*reg &= 0xffffffff00000000UL;
258 		*reg |= SIGP_STATUS_INVALID_PARAMETER;
259 		rc = SIGP_CC_STATUS_STORED;
260 	}
261 	return rc;
262 }
263 
264 static int __sigp_sense_running(struct kvm_vcpu *vcpu,
265 				struct kvm_vcpu *dst_vcpu, u64 *reg)
266 {
267 	struct kvm_s390_local_interrupt *li;
268 	int rc;
269 
270 	li = &dst_vcpu->arch.local_int;
271 	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
272 		/* running */
273 		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
274 	} else {
275 		/* not running */
276 		*reg &= 0xffffffff00000000UL;
277 		*reg |= SIGP_STATUS_NOT_RUNNING;
278 		rc = SIGP_CC_STATUS_STORED;
279 	}
280 
281 	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
282 		   dst_vcpu->vcpu_id, rc);
283 
284 	return rc;
285 }
286 
287 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
288 				   struct kvm_vcpu *dst_vcpu, u8 order_code)
289 {
290 	struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
291 	/* handle (RE)START in user space */
292 	int rc = -EOPNOTSUPP;
293 
294 	spin_lock(&li->lock);
295 	if (li->action_bits & ACTION_STOP_ON_STOP)
296 		rc = SIGP_CC_BUSY;
297 	spin_unlock(&li->lock);
298 
299 	return rc;
300 }
301 
302 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
303 				    struct kvm_vcpu *dst_vcpu, u8 order_code)
304 {
305 	/* handle (INITIAL) CPU RESET in user space */
306 	return -EOPNOTSUPP;
307 }
308 
309 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
310 				  struct kvm_vcpu *dst_vcpu)
311 {
312 	/* handle unknown orders in user space */
313 	return -EOPNOTSUPP;
314 }
315 
316 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
317 			   u16 cpu_addr, u32 parameter, u64 *status_reg)
318 {
319 	int rc;
320 	struct kvm_vcpu *dst_vcpu;
321 
322 	if (cpu_addr >= KVM_MAX_VCPUS)
323 		return SIGP_CC_NOT_OPERATIONAL;
324 
325 	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
326 	if (!dst_vcpu)
327 		return SIGP_CC_NOT_OPERATIONAL;
328 
329 	switch (order_code) {
330 	case SIGP_SENSE:
331 		vcpu->stat.instruction_sigp_sense++;
332 		rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
333 		break;
334 	case SIGP_EXTERNAL_CALL:
335 		vcpu->stat.instruction_sigp_external_call++;
336 		rc = __sigp_external_call(vcpu, dst_vcpu);
337 		break;
338 	case SIGP_EMERGENCY_SIGNAL:
339 		vcpu->stat.instruction_sigp_emergency++;
340 		rc = __sigp_emergency(vcpu, dst_vcpu);
341 		break;
342 	case SIGP_STOP:
343 		vcpu->stat.instruction_sigp_stop++;
344 		rc = __sigp_stop(vcpu, dst_vcpu);
345 		break;
346 	case SIGP_STOP_AND_STORE_STATUS:
347 		vcpu->stat.instruction_sigp_stop_store_status++;
348 		rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
349 		break;
350 	case SIGP_STORE_STATUS_AT_ADDRESS:
351 		vcpu->stat.instruction_sigp_store_status++;
352 		rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
353 						 status_reg);
354 		break;
355 	case SIGP_SET_PREFIX:
356 		vcpu->stat.instruction_sigp_prefix++;
357 		rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
358 		break;
359 	case SIGP_COND_EMERGENCY_SIGNAL:
360 		vcpu->stat.instruction_sigp_cond_emergency++;
361 		rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
362 						  status_reg);
363 		break;
364 	case SIGP_SENSE_RUNNING:
365 		vcpu->stat.instruction_sigp_sense_running++;
366 		rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
367 		break;
368 	case SIGP_START:
369 		vcpu->stat.instruction_sigp_start++;
370 		rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
371 		break;
372 	case SIGP_RESTART:
373 		vcpu->stat.instruction_sigp_restart++;
374 		rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
375 		break;
376 	case SIGP_INITIAL_CPU_RESET:
377 		vcpu->stat.instruction_sigp_init_cpu_reset++;
378 		rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
379 		break;
380 	case SIGP_CPU_RESET:
381 		vcpu->stat.instruction_sigp_cpu_reset++;
382 		rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
383 		break;
384 	default:
385 		vcpu->stat.instruction_sigp_unknown++;
386 		rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
387 	}
388 
389 	if (rc == -EOPNOTSUPP)
390 		VCPU_EVENT(vcpu, 4,
391 			   "sigp order %u -> cpu %x: handled in user space",
392 			   order_code, dst_vcpu->vcpu_id);
393 
394 	return rc;
395 }
396 
397 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
398 {
399 	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
400 	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
401 	u32 parameter;
402 	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
403 	u8 order_code;
404 	int rc;
405 
406 	/* sigp in userspace can exit */
407 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
408 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
409 
410 	order_code = kvm_s390_get_base_disp_rs(vcpu);
411 
412 	if (r1 % 2)
413 		parameter = vcpu->run->s.regs.gprs[r1];
414 	else
415 		parameter = vcpu->run->s.regs.gprs[r1 + 1];
416 
417 	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
418 	switch (order_code) {
419 	case SIGP_SET_ARCHITECTURE:
420 		vcpu->stat.instruction_sigp_arch++;
421 		rc = __sigp_set_arch(vcpu, parameter);
422 		break;
423 	default:
424 		rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
425 				     parameter,
426 				     &vcpu->run->s.regs.gprs[r1]);
427 	}
428 
429 	if (rc < 0)
430 		return rc;
431 
432 	kvm_s390_set_psw_cc(vcpu, rc);
433 	return 0;
434 }
435 
436 /*
437  * Handle SIGP partial execution interception.
438  *
439  * This interception will occur at the source cpu when a source cpu sends an
440  * external call to a target cpu and the target cpu has the WAIT bit set in
441  * its cpuflags. Interception will occurr after the interrupt indicator bits at
442  * the target cpu have been set. All error cases will lead to instruction
443  * interception, therefore nothing is to be checked or prepared.
444  */
445 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
446 {
447 	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
448 	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
449 	struct kvm_vcpu *dest_vcpu;
450 	u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
451 
452 	trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
453 
454 	if (order_code == SIGP_EXTERNAL_CALL) {
455 		dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
456 		BUG_ON(dest_vcpu == NULL);
457 
458 		kvm_s390_vcpu_wakeup(dest_vcpu);
459 		kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
460 		return 0;
461 	}
462 
463 	return -EOPNOTSUPP;
464 }
465