xref: /openbmc/linux/arch/s390/kvm/interrupt.c (revision 75f25bd3)
1 /*
2  * interrupt.c - handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12 
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 
23 static int psw_extint_disabled(struct kvm_vcpu *vcpu)
24 {
25 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
26 }
27 
28 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
29 {
30 	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
31 	    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
32 	    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
33 		return 0;
34 	return 1;
35 }
36 
37 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
38 				      struct kvm_s390_interrupt_info *inti)
39 {
40 	switch (inti->type) {
41 	case KVM_S390_INT_EMERGENCY:
42 		if (psw_extint_disabled(vcpu))
43 			return 0;
44 		if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
45 			return 1;
46 		return 0;
47 	case KVM_S390_INT_SERVICE:
48 		if (psw_extint_disabled(vcpu))
49 			return 0;
50 		if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
51 			return 1;
52 		return 0;
53 	case KVM_S390_INT_VIRTIO:
54 		if (psw_extint_disabled(vcpu))
55 			return 0;
56 		if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
57 			return 1;
58 		return 0;
59 	case KVM_S390_PROGRAM_INT:
60 	case KVM_S390_SIGP_STOP:
61 	case KVM_S390_SIGP_SET_PREFIX:
62 	case KVM_S390_RESTART:
63 		return 1;
64 	default:
65 		BUG();
66 	}
67 	return 0;
68 }
69 
70 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
71 {
72 	BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
73 	atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
74 	set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
75 }
76 
77 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
78 {
79 	BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
80 	atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
81 	clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
82 }
83 
84 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
85 {
86 	atomic_clear_mask(CPUSTAT_ECALL_PEND |
87 		CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
88 		&vcpu->arch.sie_block->cpuflags);
89 	vcpu->arch.sie_block->lctl = 0x0000;
90 }
91 
92 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
93 {
94 	atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
95 }
96 
97 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
98 				      struct kvm_s390_interrupt_info *inti)
99 {
100 	switch (inti->type) {
101 	case KVM_S390_INT_EMERGENCY:
102 	case KVM_S390_INT_SERVICE:
103 	case KVM_S390_INT_VIRTIO:
104 		if (psw_extint_disabled(vcpu))
105 			__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
106 		else
107 			vcpu->arch.sie_block->lctl |= LCTL_CR0;
108 		break;
109 	case KVM_S390_SIGP_STOP:
110 		__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
111 		break;
112 	default:
113 		BUG();
114 	}
115 }
116 
117 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
118 				   struct kvm_s390_interrupt_info *inti)
119 {
120 	const unsigned short table[] = { 2, 4, 4, 6 };
121 	int rc, exception = 0;
122 
123 	switch (inti->type) {
124 	case KVM_S390_INT_EMERGENCY:
125 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
126 		vcpu->stat.deliver_emergency_signal++;
127 		rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
128 		if (rc == -EFAULT)
129 			exception = 1;
130 
131 		rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
132 		if (rc == -EFAULT)
133 			exception = 1;
134 
135 		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
136 			 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
137 		if (rc == -EFAULT)
138 			exception = 1;
139 
140 		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
141 			__LC_EXT_NEW_PSW, sizeof(psw_t));
142 		if (rc == -EFAULT)
143 			exception = 1;
144 		break;
145 
146 	case KVM_S390_INT_SERVICE:
147 		VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
148 			   inti->ext.ext_params);
149 		vcpu->stat.deliver_service_signal++;
150 		rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
151 		if (rc == -EFAULT)
152 			exception = 1;
153 
154 		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
155 			 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
156 		if (rc == -EFAULT)
157 			exception = 1;
158 
159 		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
160 			__LC_EXT_NEW_PSW, sizeof(psw_t));
161 		if (rc == -EFAULT)
162 			exception = 1;
163 
164 		rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
165 		if (rc == -EFAULT)
166 			exception = 1;
167 		break;
168 
169 	case KVM_S390_INT_VIRTIO:
170 		VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
171 			   inti->ext.ext_params, inti->ext.ext_params2);
172 		vcpu->stat.deliver_virtio_interrupt++;
173 		rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
174 		if (rc == -EFAULT)
175 			exception = 1;
176 
177 		rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
178 		if (rc == -EFAULT)
179 			exception = 1;
180 
181 		rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
182 			 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
183 		if (rc == -EFAULT)
184 			exception = 1;
185 
186 		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
187 			__LC_EXT_NEW_PSW, sizeof(psw_t));
188 		if (rc == -EFAULT)
189 			exception = 1;
190 
191 		rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
192 		if (rc == -EFAULT)
193 			exception = 1;
194 
195 		rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
196 				   inti->ext.ext_params2);
197 		if (rc == -EFAULT)
198 			exception = 1;
199 		break;
200 
201 	case KVM_S390_SIGP_STOP:
202 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
203 		vcpu->stat.deliver_stop_signal++;
204 		__set_intercept_indicator(vcpu, inti);
205 		break;
206 
207 	case KVM_S390_SIGP_SET_PREFIX:
208 		VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
209 			   inti->prefix.address);
210 		vcpu->stat.deliver_prefix_signal++;
211 		vcpu->arch.sie_block->prefix = inti->prefix.address;
212 		vcpu->arch.sie_block->ihcpu = 0xffff;
213 		break;
214 
215 	case KVM_S390_RESTART:
216 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
217 		vcpu->stat.deliver_restart_signal++;
218 		rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
219 		  restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
220 		if (rc == -EFAULT)
221 			exception = 1;
222 
223 		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
224 			offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
225 		if (rc == -EFAULT)
226 			exception = 1;
227 		break;
228 
229 	case KVM_S390_PROGRAM_INT:
230 		VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
231 			   inti->pgm.code,
232 			   table[vcpu->arch.sie_block->ipa >> 14]);
233 		vcpu->stat.deliver_program_int++;
234 		rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
235 		if (rc == -EFAULT)
236 			exception = 1;
237 
238 		rc = put_guest_u16(vcpu, __LC_PGM_ILC,
239 			table[vcpu->arch.sie_block->ipa >> 14]);
240 		if (rc == -EFAULT)
241 			exception = 1;
242 
243 		rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
244 			 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
245 		if (rc == -EFAULT)
246 			exception = 1;
247 
248 		rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
249 			__LC_PGM_NEW_PSW, sizeof(psw_t));
250 		if (rc == -EFAULT)
251 			exception = 1;
252 		break;
253 
254 	default:
255 		BUG();
256 	}
257 	if (exception) {
258 		printk("kvm: The guest lowcore is not mapped during interrupt "
259 			"delivery, killing userspace\n");
260 		do_exit(SIGKILL);
261 	}
262 }
263 
264 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
265 {
266 	int rc, exception = 0;
267 
268 	if (psw_extint_disabled(vcpu))
269 		return 0;
270 	if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
271 		return 0;
272 	rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
273 	if (rc == -EFAULT)
274 		exception = 1;
275 	rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
276 		 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
277 	if (rc == -EFAULT)
278 		exception = 1;
279 	rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
280 		__LC_EXT_NEW_PSW, sizeof(psw_t));
281 	if (rc == -EFAULT)
282 		exception = 1;
283 	if (exception) {
284 		printk("kvm: The guest lowcore is not mapped during interrupt "
285 			"delivery, killing userspace\n");
286 		do_exit(SIGKILL);
287 	}
288 	return 1;
289 }
290 
291 static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
292 {
293 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
294 	struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
295 	struct kvm_s390_interrupt_info  *inti;
296 	int rc = 0;
297 
298 	if (atomic_read(&li->active)) {
299 		spin_lock_bh(&li->lock);
300 		list_for_each_entry(inti, &li->list, list)
301 			if (__interrupt_is_deliverable(vcpu, inti)) {
302 				rc = 1;
303 				break;
304 			}
305 		spin_unlock_bh(&li->lock);
306 	}
307 
308 	if ((!rc) && atomic_read(&fi->active)) {
309 		spin_lock(&fi->lock);
310 		list_for_each_entry(inti, &fi->list, list)
311 			if (__interrupt_is_deliverable(vcpu, inti)) {
312 				rc = 1;
313 				break;
314 			}
315 		spin_unlock(&fi->lock);
316 	}
317 
318 	if ((!rc) && (vcpu->arch.sie_block->ckc <
319 		get_clock() + vcpu->arch.sie_block->epoch)) {
320 		if ((!psw_extint_disabled(vcpu)) &&
321 			(vcpu->arch.sie_block->gcr[0] & 0x800ul))
322 			rc = 1;
323 	}
324 
325 	return rc;
326 }
327 
328 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
329 {
330 	return 0;
331 }
332 
333 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
334 {
335 	u64 now, sltime;
336 	DECLARE_WAITQUEUE(wait, current);
337 
338 	vcpu->stat.exit_wait_state++;
339 	if (kvm_cpu_has_interrupt(vcpu))
340 		return 0;
341 
342 	__set_cpu_idle(vcpu);
343 	spin_lock_bh(&vcpu->arch.local_int.lock);
344 	vcpu->arch.local_int.timer_due = 0;
345 	spin_unlock_bh(&vcpu->arch.local_int.lock);
346 
347 	if (psw_interrupts_disabled(vcpu)) {
348 		VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
349 		__unset_cpu_idle(vcpu);
350 		return -EOPNOTSUPP; /* disabled wait */
351 	}
352 
353 	if (psw_extint_disabled(vcpu) ||
354 	    (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
355 		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
356 		goto no_timer;
357 	}
358 
359 	now = get_clock() + vcpu->arch.sie_block->epoch;
360 	if (vcpu->arch.sie_block->ckc < now) {
361 		__unset_cpu_idle(vcpu);
362 		return 0;
363 	}
364 
365 	sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
366 
367 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
368 	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
369 no_timer:
370 	spin_lock(&vcpu->arch.local_int.float_int->lock);
371 	spin_lock_bh(&vcpu->arch.local_int.lock);
372 	add_wait_queue(&vcpu->arch.local_int.wq, &wait);
373 	while (list_empty(&vcpu->arch.local_int.list) &&
374 		list_empty(&vcpu->arch.local_int.float_int->list) &&
375 		(!vcpu->arch.local_int.timer_due) &&
376 		!signal_pending(current)) {
377 		set_current_state(TASK_INTERRUPTIBLE);
378 		spin_unlock_bh(&vcpu->arch.local_int.lock);
379 		spin_unlock(&vcpu->arch.local_int.float_int->lock);
380 		vcpu_put(vcpu);
381 		schedule();
382 		vcpu_load(vcpu);
383 		spin_lock(&vcpu->arch.local_int.float_int->lock);
384 		spin_lock_bh(&vcpu->arch.local_int.lock);
385 	}
386 	__unset_cpu_idle(vcpu);
387 	__set_current_state(TASK_RUNNING);
388 	remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
389 	spin_unlock_bh(&vcpu->arch.local_int.lock);
390 	spin_unlock(&vcpu->arch.local_int.float_int->lock);
391 	hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
392 	return 0;
393 }
394 
395 void kvm_s390_tasklet(unsigned long parm)
396 {
397 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
398 
399 	spin_lock(&vcpu->arch.local_int.lock);
400 	vcpu->arch.local_int.timer_due = 1;
401 	if (waitqueue_active(&vcpu->arch.local_int.wq))
402 		wake_up_interruptible(&vcpu->arch.local_int.wq);
403 	spin_unlock(&vcpu->arch.local_int.lock);
404 }
405 
406 /*
407  * low level hrtimer wake routine. Because this runs in hardirq context
408  * we schedule a tasklet to do the real work.
409  */
410 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
411 {
412 	struct kvm_vcpu *vcpu;
413 
414 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
415 	tasklet_schedule(&vcpu->arch.tasklet);
416 
417 	return HRTIMER_NORESTART;
418 }
419 
420 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
421 {
422 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
423 	struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
424 	struct kvm_s390_interrupt_info  *n, *inti = NULL;
425 	int deliver;
426 
427 	__reset_intercept_indicators(vcpu);
428 	if (atomic_read(&li->active)) {
429 		do {
430 			deliver = 0;
431 			spin_lock_bh(&li->lock);
432 			list_for_each_entry_safe(inti, n, &li->list, list) {
433 				if (__interrupt_is_deliverable(vcpu, inti)) {
434 					list_del(&inti->list);
435 					deliver = 1;
436 					break;
437 				}
438 				__set_intercept_indicator(vcpu, inti);
439 			}
440 			if (list_empty(&li->list))
441 				atomic_set(&li->active, 0);
442 			spin_unlock_bh(&li->lock);
443 			if (deliver) {
444 				__do_deliver_interrupt(vcpu, inti);
445 				kfree(inti);
446 			}
447 		} while (deliver);
448 	}
449 
450 	if ((vcpu->arch.sie_block->ckc <
451 		get_clock() + vcpu->arch.sie_block->epoch))
452 		__try_deliver_ckc_interrupt(vcpu);
453 
454 	if (atomic_read(&fi->active)) {
455 		do {
456 			deliver = 0;
457 			spin_lock(&fi->lock);
458 			list_for_each_entry_safe(inti, n, &fi->list, list) {
459 				if (__interrupt_is_deliverable(vcpu, inti)) {
460 					list_del(&inti->list);
461 					deliver = 1;
462 					break;
463 				}
464 				__set_intercept_indicator(vcpu, inti);
465 			}
466 			if (list_empty(&fi->list))
467 				atomic_set(&fi->active, 0);
468 			spin_unlock(&fi->lock);
469 			if (deliver) {
470 				__do_deliver_interrupt(vcpu, inti);
471 				kfree(inti);
472 			}
473 		} while (deliver);
474 	}
475 }
476 
477 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
478 {
479 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
480 	struct kvm_s390_interrupt_info *inti;
481 
482 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
483 	if (!inti)
484 		return -ENOMEM;
485 
486 	inti->type = KVM_S390_PROGRAM_INT;
487 	inti->pgm.code = code;
488 
489 	VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
490 	spin_lock_bh(&li->lock);
491 	list_add(&inti->list, &li->list);
492 	atomic_set(&li->active, 1);
493 	BUG_ON(waitqueue_active(&li->wq));
494 	spin_unlock_bh(&li->lock);
495 	return 0;
496 }
497 
498 int kvm_s390_inject_vm(struct kvm *kvm,
499 		       struct kvm_s390_interrupt *s390int)
500 {
501 	struct kvm_s390_local_interrupt *li;
502 	struct kvm_s390_float_interrupt *fi;
503 	struct kvm_s390_interrupt_info *inti;
504 	int sigcpu;
505 
506 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
507 	if (!inti)
508 		return -ENOMEM;
509 
510 	switch (s390int->type) {
511 	case KVM_S390_INT_VIRTIO:
512 		VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
513 			 s390int->parm, s390int->parm64);
514 		inti->type = s390int->type;
515 		inti->ext.ext_params = s390int->parm;
516 		inti->ext.ext_params2 = s390int->parm64;
517 		break;
518 	case KVM_S390_INT_SERVICE:
519 		VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
520 		inti->type = s390int->type;
521 		inti->ext.ext_params = s390int->parm;
522 		break;
523 	case KVM_S390_PROGRAM_INT:
524 	case KVM_S390_SIGP_STOP:
525 	case KVM_S390_INT_EMERGENCY:
526 	default:
527 		kfree(inti);
528 		return -EINVAL;
529 	}
530 
531 	mutex_lock(&kvm->lock);
532 	fi = &kvm->arch.float_int;
533 	spin_lock(&fi->lock);
534 	list_add_tail(&inti->list, &fi->list);
535 	atomic_set(&fi->active, 1);
536 	sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
537 	if (sigcpu == KVM_MAX_VCPUS) {
538 		do {
539 			sigcpu = fi->next_rr_cpu++;
540 			if (sigcpu == KVM_MAX_VCPUS)
541 				sigcpu = fi->next_rr_cpu = 0;
542 		} while (fi->local_int[sigcpu] == NULL);
543 	}
544 	li = fi->local_int[sigcpu];
545 	spin_lock_bh(&li->lock);
546 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
547 	if (waitqueue_active(&li->wq))
548 		wake_up_interruptible(&li->wq);
549 	spin_unlock_bh(&li->lock);
550 	spin_unlock(&fi->lock);
551 	mutex_unlock(&kvm->lock);
552 	return 0;
553 }
554 
555 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
556 			 struct kvm_s390_interrupt *s390int)
557 {
558 	struct kvm_s390_local_interrupt *li;
559 	struct kvm_s390_interrupt_info *inti;
560 
561 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
562 	if (!inti)
563 		return -ENOMEM;
564 
565 	switch (s390int->type) {
566 	case KVM_S390_PROGRAM_INT:
567 		if (s390int->parm & 0xffff0000) {
568 			kfree(inti);
569 			return -EINVAL;
570 		}
571 		inti->type = s390int->type;
572 		inti->pgm.code = s390int->parm;
573 		VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
574 			   s390int->parm);
575 		break;
576 	case KVM_S390_SIGP_SET_PREFIX:
577 		inti->prefix.address = s390int->parm;
578 		inti->type = s390int->type;
579 		VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
580 			   s390int->parm);
581 		break;
582 	case KVM_S390_SIGP_STOP:
583 	case KVM_S390_RESTART:
584 	case KVM_S390_INT_EMERGENCY:
585 		VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
586 		inti->type = s390int->type;
587 		break;
588 	case KVM_S390_INT_VIRTIO:
589 	case KVM_S390_INT_SERVICE:
590 	default:
591 		kfree(inti);
592 		return -EINVAL;
593 	}
594 
595 	mutex_lock(&vcpu->kvm->lock);
596 	li = &vcpu->arch.local_int;
597 	spin_lock_bh(&li->lock);
598 	if (inti->type == KVM_S390_PROGRAM_INT)
599 		list_add(&inti->list, &li->list);
600 	else
601 		list_add_tail(&inti->list, &li->list);
602 	atomic_set(&li->active, 1);
603 	if (inti->type == KVM_S390_SIGP_STOP)
604 		li->action_bits |= ACTION_STOP_ON_STOP;
605 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
606 	if (waitqueue_active(&li->wq))
607 		wake_up_interruptible(&vcpu->arch.local_int.wq);
608 	spin_unlock_bh(&li->lock);
609 	mutex_unlock(&vcpu->kvm->lock);
610 	return 0;
611 }
612