xref: /openbmc/linux/arch/s390/kvm/sigp.c (revision afb46f79)
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14 
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22 
23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24 			u64 *reg)
25 {
26 	struct kvm_s390_local_interrupt *li;
27 	struct kvm_vcpu *dst_vcpu = NULL;
28 	int cpuflags;
29 	int rc;
30 
31 	if (cpu_addr >= KVM_MAX_VCPUS)
32 		return SIGP_CC_NOT_OPERATIONAL;
33 
34 	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
35 	if (!dst_vcpu)
36 		return SIGP_CC_NOT_OPERATIONAL;
37 	li = &dst_vcpu->arch.local_int;
38 
39 	cpuflags = atomic_read(li->cpuflags);
40 	if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
41 		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
42 	else {
43 		*reg &= 0xffffffff00000000UL;
44 		if (cpuflags & CPUSTAT_ECALL_PEND)
45 			*reg |= SIGP_STATUS_EXT_CALL_PENDING;
46 		if (cpuflags & CPUSTAT_STOPPED)
47 			*reg |= SIGP_STATUS_STOPPED;
48 		rc = SIGP_CC_STATUS_STORED;
49 	}
50 
51 	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
52 	return rc;
53 }
54 
55 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
56 {
57 	struct kvm_s390_local_interrupt *li;
58 	struct kvm_s390_interrupt_info *inti;
59 	struct kvm_vcpu *dst_vcpu = NULL;
60 
61 	if (cpu_addr < KVM_MAX_VCPUS)
62 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
63 	if (!dst_vcpu)
64 		return SIGP_CC_NOT_OPERATIONAL;
65 
66 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
67 	if (!inti)
68 		return -ENOMEM;
69 
70 	inti->type = KVM_S390_INT_EMERGENCY;
71 	inti->emerg.code = vcpu->vcpu_id;
72 
73 	li = &dst_vcpu->arch.local_int;
74 	spin_lock_bh(&li->lock);
75 	list_add_tail(&inti->list, &li->list);
76 	atomic_set(&li->active, 1);
77 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
78 	if (waitqueue_active(li->wq))
79 		wake_up_interruptible(li->wq);
80 	spin_unlock_bh(&li->lock);
81 	VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
82 
83 	return SIGP_CC_ORDER_CODE_ACCEPTED;
84 }
85 
86 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
87 					u16 asn, u64 *reg)
88 {
89 	struct kvm_vcpu *dst_vcpu = NULL;
90 	const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
91 	u16 p_asn, s_asn;
92 	psw_t *psw;
93 	u32 flags;
94 
95 	if (cpu_addr < KVM_MAX_VCPUS)
96 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
97 	if (!dst_vcpu)
98 		return SIGP_CC_NOT_OPERATIONAL;
99 	flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
100 	psw = &dst_vcpu->arch.sie_block->gpsw;
101 	p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
102 	s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */
103 
104 	/* Deliver the emergency signal? */
105 	if (!(flags & CPUSTAT_STOPPED)
106 	    || (psw->mask & psw_int_mask) != psw_int_mask
107 	    || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
108 	    || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
109 		return __sigp_emergency(vcpu, cpu_addr);
110 	} else {
111 		*reg &= 0xffffffff00000000UL;
112 		*reg |= SIGP_STATUS_INCORRECT_STATE;
113 		return SIGP_CC_STATUS_STORED;
114 	}
115 }
116 
117 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
118 {
119 	struct kvm_s390_local_interrupt *li;
120 	struct kvm_s390_interrupt_info *inti;
121 	struct kvm_vcpu *dst_vcpu = NULL;
122 
123 	if (cpu_addr < KVM_MAX_VCPUS)
124 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
125 	if (!dst_vcpu)
126 		return SIGP_CC_NOT_OPERATIONAL;
127 
128 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
129 	if (!inti)
130 		return -ENOMEM;
131 
132 	inti->type = KVM_S390_INT_EXTERNAL_CALL;
133 	inti->extcall.code = vcpu->vcpu_id;
134 
135 	li = &dst_vcpu->arch.local_int;
136 	spin_lock_bh(&li->lock);
137 	list_add_tail(&inti->list, &li->list);
138 	atomic_set(&li->active, 1);
139 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
140 	if (waitqueue_active(li->wq))
141 		wake_up_interruptible(li->wq);
142 	spin_unlock_bh(&li->lock);
143 	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
144 
145 	return SIGP_CC_ORDER_CODE_ACCEPTED;
146 }
147 
148 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
149 {
150 	struct kvm_s390_interrupt_info *inti;
151 	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
152 
153 	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
154 	if (!inti)
155 		return -ENOMEM;
156 	inti->type = KVM_S390_SIGP_STOP;
157 
158 	spin_lock_bh(&li->lock);
159 	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
160 		kfree(inti);
161 		if ((action & ACTION_STORE_ON_STOP) != 0)
162 			rc = -ESHUTDOWN;
163 		goto out;
164 	}
165 	list_add_tail(&inti->list, &li->list);
166 	atomic_set(&li->active, 1);
167 	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
168 	li->action_bits |= action;
169 	if (waitqueue_active(li->wq))
170 		wake_up_interruptible(li->wq);
171 out:
172 	spin_unlock_bh(&li->lock);
173 
174 	return rc;
175 }
176 
177 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
178 {
179 	struct kvm_s390_local_interrupt *li;
180 	struct kvm_vcpu *dst_vcpu = NULL;
181 	int rc;
182 
183 	if (cpu_addr >= KVM_MAX_VCPUS)
184 		return SIGP_CC_NOT_OPERATIONAL;
185 
186 	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
187 	if (!dst_vcpu)
188 		return SIGP_CC_NOT_OPERATIONAL;
189 	li = &dst_vcpu->arch.local_int;
190 
191 	rc = __inject_sigp_stop(li, action);
192 
193 	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
194 
195 	if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
196 		/* If the CPU has already been stopped, we still have
197 		 * to save the status when doing stop-and-store. This
198 		 * has to be done after unlocking all spinlocks. */
199 		rc = kvm_s390_store_status_unloaded(dst_vcpu,
200 						KVM_S390_STORE_STATUS_NOADDR);
201 	}
202 
203 	return rc;
204 }
205 
206 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
207 {
208 	int rc;
209 	unsigned int i;
210 	struct kvm_vcpu *v;
211 
212 	switch (parameter & 0xff) {
213 	case 0:
214 		rc = SIGP_CC_NOT_OPERATIONAL;
215 		break;
216 	case 1:
217 	case 2:
218 		kvm_for_each_vcpu(i, v, vcpu->kvm) {
219 			v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
220 			kvm_clear_async_pf_completion_queue(v);
221 		}
222 
223 		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
224 		break;
225 	default:
226 		rc = -EOPNOTSUPP;
227 	}
228 	return rc;
229 }
230 
231 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
232 			     u64 *reg)
233 {
234 	struct kvm_s390_local_interrupt *li;
235 	struct kvm_vcpu *dst_vcpu = NULL;
236 	struct kvm_s390_interrupt_info *inti;
237 	int rc;
238 	u8 tmp;
239 
240 	if (cpu_addr < KVM_MAX_VCPUS)
241 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
242 	if (!dst_vcpu)
243 		return SIGP_CC_NOT_OPERATIONAL;
244 	li = &dst_vcpu->arch.local_int;
245 
246 	/* make sure that the new value is valid memory */
247 	address = address & 0x7fffe000u;
248 	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
249 	   copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
250 		*reg &= 0xffffffff00000000UL;
251 		*reg |= SIGP_STATUS_INVALID_PARAMETER;
252 		return SIGP_CC_STATUS_STORED;
253 	}
254 
255 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
256 	if (!inti)
257 		return SIGP_CC_BUSY;
258 
259 	spin_lock_bh(&li->lock);
260 	/* cpu must be in stopped state */
261 	if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
262 		*reg &= 0xffffffff00000000UL;
263 		*reg |= SIGP_STATUS_INCORRECT_STATE;
264 		rc = SIGP_CC_STATUS_STORED;
265 		kfree(inti);
266 		goto out_li;
267 	}
268 
269 	inti->type = KVM_S390_SIGP_SET_PREFIX;
270 	inti->prefix.address = address;
271 
272 	list_add_tail(&inti->list, &li->list);
273 	atomic_set(&li->active, 1);
274 	if (waitqueue_active(li->wq))
275 		wake_up_interruptible(li->wq);
276 	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
277 
278 	VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
279 out_li:
280 	spin_unlock_bh(&li->lock);
281 	return rc;
282 }
283 
284 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
285 					u32 addr, u64 *reg)
286 {
287 	struct kvm_vcpu *dst_vcpu = NULL;
288 	int flags;
289 	int rc;
290 
291 	if (cpu_id < KVM_MAX_VCPUS)
292 		dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
293 	if (!dst_vcpu)
294 		return SIGP_CC_NOT_OPERATIONAL;
295 
296 	spin_lock_bh(&dst_vcpu->arch.local_int.lock);
297 	flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
298 	spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
299 	if (!(flags & CPUSTAT_STOPPED)) {
300 		*reg &= 0xffffffff00000000UL;
301 		*reg |= SIGP_STATUS_INCORRECT_STATE;
302 		return SIGP_CC_STATUS_STORED;
303 	}
304 
305 	addr &= 0x7ffffe00;
306 	rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
307 	if (rc == -EFAULT) {
308 		*reg &= 0xffffffff00000000UL;
309 		*reg |= SIGP_STATUS_INVALID_PARAMETER;
310 		rc = SIGP_CC_STATUS_STORED;
311 	}
312 	return rc;
313 }
314 
315 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
316 				u64 *reg)
317 {
318 	struct kvm_s390_local_interrupt *li;
319 	struct kvm_vcpu *dst_vcpu = NULL;
320 	int rc;
321 
322 	if (cpu_addr >= KVM_MAX_VCPUS)
323 		return SIGP_CC_NOT_OPERATIONAL;
324 
325 	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
326 	if (!dst_vcpu)
327 		return SIGP_CC_NOT_OPERATIONAL;
328 	li = &dst_vcpu->arch.local_int;
329 	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
330 		/* running */
331 		rc = SIGP_CC_ORDER_CODE_ACCEPTED;
332 	} else {
333 		/* not running */
334 		*reg &= 0xffffffff00000000UL;
335 		*reg |= SIGP_STATUS_NOT_RUNNING;
336 		rc = SIGP_CC_STATUS_STORED;
337 	}
338 
339 	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
340 		   rc);
341 
342 	return rc;
343 }
344 
345 /* Test whether the destination CPU is available and not busy */
346 static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
347 {
348 	struct kvm_s390_local_interrupt *li;
349 	int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 	struct kvm_vcpu *dst_vcpu = NULL;
351 
352 	if (cpu_addr >= KVM_MAX_VCPUS)
353 		return SIGP_CC_NOT_OPERATIONAL;
354 
355 	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
356 	if (!dst_vcpu)
357 		return SIGP_CC_NOT_OPERATIONAL;
358 	li = &dst_vcpu->arch.local_int;
359 	spin_lock_bh(&li->lock);
360 	if (li->action_bits & ACTION_STOP_ON_STOP)
361 		rc = SIGP_CC_BUSY;
362 	spin_unlock_bh(&li->lock);
363 
364 	return rc;
365 }
366 
367 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
368 {
369 	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
370 	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
371 	u32 parameter;
372 	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
373 	u8 order_code;
374 	int rc;
375 
376 	/* sigp in userspace can exit */
377 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
378 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
379 
380 	order_code = kvm_s390_get_base_disp_rs(vcpu);
381 
382 	if (r1 % 2)
383 		parameter = vcpu->run->s.regs.gprs[r1];
384 	else
385 		parameter = vcpu->run->s.regs.gprs[r1 + 1];
386 
387 	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
388 	switch (order_code) {
389 	case SIGP_SENSE:
390 		vcpu->stat.instruction_sigp_sense++;
391 		rc = __sigp_sense(vcpu, cpu_addr,
392 				  &vcpu->run->s.regs.gprs[r1]);
393 		break;
394 	case SIGP_EXTERNAL_CALL:
395 		vcpu->stat.instruction_sigp_external_call++;
396 		rc = __sigp_external_call(vcpu, cpu_addr);
397 		break;
398 	case SIGP_EMERGENCY_SIGNAL:
399 		vcpu->stat.instruction_sigp_emergency++;
400 		rc = __sigp_emergency(vcpu, cpu_addr);
401 		break;
402 	case SIGP_STOP:
403 		vcpu->stat.instruction_sigp_stop++;
404 		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
405 		break;
406 	case SIGP_STOP_AND_STORE_STATUS:
407 		vcpu->stat.instruction_sigp_stop++;
408 		rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
409 						 ACTION_STOP_ON_STOP);
410 		break;
411 	case SIGP_STORE_STATUS_AT_ADDRESS:
412 		rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
413 						 &vcpu->run->s.regs.gprs[r1]);
414 		break;
415 	case SIGP_SET_ARCHITECTURE:
416 		vcpu->stat.instruction_sigp_arch++;
417 		rc = __sigp_set_arch(vcpu, parameter);
418 		break;
419 	case SIGP_SET_PREFIX:
420 		vcpu->stat.instruction_sigp_prefix++;
421 		rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
422 				       &vcpu->run->s.regs.gprs[r1]);
423 		break;
424 	case SIGP_COND_EMERGENCY_SIGNAL:
425 		rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
426 						  &vcpu->run->s.regs.gprs[r1]);
427 		break;
428 	case SIGP_SENSE_RUNNING:
429 		vcpu->stat.instruction_sigp_sense_running++;
430 		rc = __sigp_sense_running(vcpu, cpu_addr,
431 					  &vcpu->run->s.regs.gprs[r1]);
432 		break;
433 	case SIGP_START:
434 		rc = sigp_check_callable(vcpu, cpu_addr);
435 		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
436 			rc = -EOPNOTSUPP;    /* Handle START in user space */
437 		break;
438 	case SIGP_RESTART:
439 		vcpu->stat.instruction_sigp_restart++;
440 		rc = sigp_check_callable(vcpu, cpu_addr);
441 		if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
442 			VCPU_EVENT(vcpu, 4,
443 				   "sigp restart %x to handle userspace",
444 				   cpu_addr);
445 			/* user space must know about restart */
446 			rc = -EOPNOTSUPP;
447 		}
448 		break;
449 	default:
450 		return -EOPNOTSUPP;
451 	}
452 
453 	if (rc < 0)
454 		return rc;
455 
456 	kvm_s390_set_psw_cc(vcpu, rc);
457 	return 0;
458 }
459