xref: /openbmc/linux/arch/s390/kvm/interrupt.c (revision b34e08d5)
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008,2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12 
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/uaccess.h>
21 #include "kvm-s390.h"
22 #include "gaccess.h"
23 #include "trace-s390.h"
24 
25 #define IOINT_SCHID_MASK 0x0000ffff
26 #define IOINT_SSID_MASK 0x00030000
27 #define IOINT_CSSID_MASK 0x03fc0000
28 #define IOINT_AI_MASK 0x04000000
29 
30 static int is_ioint(u64 type)
31 {
32 	return ((type & 0xfffe0000u) != 0xfffe0000u);
33 }
34 
35 int psw_extint_disabled(struct kvm_vcpu *vcpu)
36 {
37 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
38 }
39 
40 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
41 {
42 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
43 }
44 
45 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
46 {
47 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
48 }
49 
50 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
51 {
52 	if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
53 	    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
54 	    (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
55 		return 0;
56 	return 1;
57 }
58 
59 static u64 int_word_to_isc_bits(u32 int_word)
60 {
61 	u8 isc = (int_word & 0x38000000) >> 27;
62 
63 	return (0x80 >> isc) << 24;
64 }
65 
66 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
67 				      struct kvm_s390_interrupt_info *inti)
68 {
69 	switch (inti->type) {
70 	case KVM_S390_INT_EXTERNAL_CALL:
71 		if (psw_extint_disabled(vcpu))
72 			return 0;
73 		if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
74 			return 1;
75 	case KVM_S390_INT_EMERGENCY:
76 		if (psw_extint_disabled(vcpu))
77 			return 0;
78 		if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
79 			return 1;
80 		return 0;
81 	case KVM_S390_INT_SERVICE:
82 	case KVM_S390_INT_PFAULT_INIT:
83 	case KVM_S390_INT_PFAULT_DONE:
84 	case KVM_S390_INT_VIRTIO:
85 		if (psw_extint_disabled(vcpu))
86 			return 0;
87 		if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
88 			return 1;
89 		return 0;
90 	case KVM_S390_PROGRAM_INT:
91 	case KVM_S390_SIGP_STOP:
92 	case KVM_S390_SIGP_SET_PREFIX:
93 	case KVM_S390_RESTART:
94 		return 1;
95 	case KVM_S390_MCHK:
96 		if (psw_mchk_disabled(vcpu))
97 			return 0;
98 		if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
99 			return 1;
100 		return 0;
101 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
102 		if (psw_ioint_disabled(vcpu))
103 			return 0;
104 		if (vcpu->arch.sie_block->gcr[6] &
105 		    int_word_to_isc_bits(inti->io.io_int_word))
106 			return 1;
107 		return 0;
108 	default:
109 		printk(KERN_WARNING "illegal interrupt type %llx\n",
110 		       inti->type);
111 		BUG();
112 	}
113 	return 0;
114 }
115 
116 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
117 {
118 	atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
119 	set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
120 }
121 
122 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
123 {
124 	atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
125 	clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
126 }
127 
128 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
129 {
130 	atomic_clear_mask(CPUSTAT_ECALL_PEND |
131 		CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
132 		&vcpu->arch.sie_block->cpuflags);
133 	vcpu->arch.sie_block->lctl = 0x0000;
134 	vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;
135 }
136 
137 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
138 {
139 	atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
140 }
141 
142 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
143 				      struct kvm_s390_interrupt_info *inti)
144 {
145 	switch (inti->type) {
146 	case KVM_S390_INT_EXTERNAL_CALL:
147 	case KVM_S390_INT_EMERGENCY:
148 	case KVM_S390_INT_SERVICE:
149 	case KVM_S390_INT_PFAULT_INIT:
150 	case KVM_S390_INT_PFAULT_DONE:
151 	case KVM_S390_INT_VIRTIO:
152 		if (psw_extint_disabled(vcpu))
153 			__set_cpuflag(vcpu, CPUSTAT_EXT_INT);
154 		else
155 			vcpu->arch.sie_block->lctl |= LCTL_CR0;
156 		break;
157 	case KVM_S390_SIGP_STOP:
158 		__set_cpuflag(vcpu, CPUSTAT_STOP_INT);
159 		break;
160 	case KVM_S390_MCHK:
161 		if (psw_mchk_disabled(vcpu))
162 			vcpu->arch.sie_block->ictl |= ICTL_LPSW;
163 		else
164 			vcpu->arch.sie_block->lctl |= LCTL_CR14;
165 		break;
166 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
167 		if (psw_ioint_disabled(vcpu))
168 			__set_cpuflag(vcpu, CPUSTAT_IO_INT);
169 		else
170 			vcpu->arch.sie_block->lctl |= LCTL_CR6;
171 		break;
172 	default:
173 		BUG();
174 	}
175 }
176 
177 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
178 				   struct kvm_s390_interrupt_info *inti)
179 {
180 	const unsigned short table[] = { 2, 4, 4, 6 };
181 	int rc = 0;
182 
183 	switch (inti->type) {
184 	case KVM_S390_INT_EMERGENCY:
185 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
186 		vcpu->stat.deliver_emergency_signal++;
187 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
188 						 inti->emerg.code, 0);
189 		rc  = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE);
190 		rc |= put_guest(vcpu, inti->emerg.code,
191 				(u16 __user *)__LC_EXT_CPU_ADDR);
192 		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
193 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
194 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
195 				      __LC_EXT_NEW_PSW, sizeof(psw_t));
196 		break;
197 	case KVM_S390_INT_EXTERNAL_CALL:
198 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
199 		vcpu->stat.deliver_external_call++;
200 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
201 						 inti->extcall.code, 0);
202 		rc  = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE);
203 		rc |= put_guest(vcpu, inti->extcall.code,
204 				(u16 __user *)__LC_EXT_CPU_ADDR);
205 		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
206 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
207 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
208 				      __LC_EXT_NEW_PSW, sizeof(psw_t));
209 		break;
210 	case KVM_S390_INT_SERVICE:
211 		VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
212 			   inti->ext.ext_params);
213 		vcpu->stat.deliver_service_signal++;
214 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
215 						 inti->ext.ext_params, 0);
216 		rc  = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE);
217 		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
218 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
219 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
220 				      __LC_EXT_NEW_PSW, sizeof(psw_t));
221 		rc |= put_guest(vcpu, inti->ext.ext_params,
222 				(u32 __user *)__LC_EXT_PARAMS);
223 		break;
224 	case KVM_S390_INT_PFAULT_INIT:
225 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
226 						 inti->ext.ext_params2);
227 		rc  = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
228 		rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR);
229 		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
230 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
231 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
232 				      __LC_EXT_NEW_PSW, sizeof(psw_t));
233 		rc |= put_guest(vcpu, inti->ext.ext_params2,
234 				(u64 __user *) __LC_EXT_PARAMS2);
235 		break;
236 	case KVM_S390_INT_PFAULT_DONE:
237 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
238 						 inti->ext.ext_params2);
239 		rc  = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
240 		rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR);
241 		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
242 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
243 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
244 				      __LC_EXT_NEW_PSW, sizeof(psw_t));
245 		rc |= put_guest(vcpu, inti->ext.ext_params2,
246 				(u64 __user *) __LC_EXT_PARAMS2);
247 		break;
248 	case KVM_S390_INT_VIRTIO:
249 		VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
250 			   inti->ext.ext_params, inti->ext.ext_params2);
251 		vcpu->stat.deliver_virtio_interrupt++;
252 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
253 						 inti->ext.ext_params,
254 						 inti->ext.ext_params2);
255 		rc  = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE);
256 		rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR);
257 		rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
258 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
259 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
260 				      __LC_EXT_NEW_PSW, sizeof(psw_t));
261 		rc |= put_guest(vcpu, inti->ext.ext_params,
262 				(u32 __user *)__LC_EXT_PARAMS);
263 		rc |= put_guest(vcpu, inti->ext.ext_params2,
264 				(u64 __user *)__LC_EXT_PARAMS2);
265 		break;
266 	case KVM_S390_SIGP_STOP:
267 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
268 		vcpu->stat.deliver_stop_signal++;
269 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
270 						 0, 0);
271 		__set_intercept_indicator(vcpu, inti);
272 		break;
273 
274 	case KVM_S390_SIGP_SET_PREFIX:
275 		VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
276 			   inti->prefix.address);
277 		vcpu->stat.deliver_prefix_signal++;
278 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
279 						 inti->prefix.address, 0);
280 		kvm_s390_set_prefix(vcpu, inti->prefix.address);
281 		break;
282 
283 	case KVM_S390_RESTART:
284 		VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
285 		vcpu->stat.deliver_restart_signal++;
286 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
287 						 0, 0);
288 		rc  = copy_to_guest(vcpu,
289 				    offsetof(struct _lowcore, restart_old_psw),
290 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
291 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
292 				      offsetof(struct _lowcore, restart_psw),
293 				      sizeof(psw_t));
294 		atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
295 		break;
296 	case KVM_S390_PROGRAM_INT:
297 		VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
298 			   inti->pgm.code,
299 			   table[vcpu->arch.sie_block->ipa >> 14]);
300 		vcpu->stat.deliver_program_int++;
301 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
302 						 inti->pgm.code, 0);
303 		rc  = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE);
304 		rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14],
305 				(u16 __user *)__LC_PGM_ILC);
306 		rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
307 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
308 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
309 				      __LC_PGM_NEW_PSW, sizeof(psw_t));
310 		break;
311 
312 	case KVM_S390_MCHK:
313 		VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
314 			   inti->mchk.mcic);
315 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
316 						 inti->mchk.cr14,
317 						 inti->mchk.mcic);
318 		rc  = kvm_s390_vcpu_store_status(vcpu,
319 						 KVM_S390_STORE_STATUS_PREFIXED);
320 		rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE);
321 		rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
322 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
323 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
324 				      __LC_MCK_NEW_PSW, sizeof(psw_t));
325 		break;
326 
327 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
328 	{
329 		__u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
330 			inti->io.subchannel_nr;
331 		__u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
332 			inti->io.io_int_word;
333 		VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
334 		vcpu->stat.deliver_io_int++;
335 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
336 						 param0, param1);
337 		rc  = put_guest(vcpu, inti->io.subchannel_id,
338 				(u16 __user *) __LC_SUBCHANNEL_ID);
339 		rc |= put_guest(vcpu, inti->io.subchannel_nr,
340 				(u16 __user *) __LC_SUBCHANNEL_NR);
341 		rc |= put_guest(vcpu, inti->io.io_int_parm,
342 				(u32 __user *) __LC_IO_INT_PARM);
343 		rc |= put_guest(vcpu, inti->io.io_int_word,
344 				(u32 __user *) __LC_IO_INT_WORD);
345 		rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
346 				    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
347 		rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
348 				      __LC_IO_NEW_PSW, sizeof(psw_t));
349 		break;
350 	}
351 	default:
352 		BUG();
353 	}
354 	if (rc) {
355 		printk("kvm: The guest lowcore is not mapped during interrupt "
356 		       "delivery, killing userspace\n");
357 		do_exit(SIGKILL);
358 	}
359 }
360 
361 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
362 {
363 	int rc;
364 
365 	if (psw_extint_disabled(vcpu))
366 		return 0;
367 	if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
368 		return 0;
369 	rc  = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
370 	rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
371 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372 	rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
373 			      __LC_EXT_NEW_PSW, sizeof(psw_t));
374 	if (rc) {
375 		printk("kvm: The guest lowcore is not mapped during interrupt "
376 			"delivery, killing userspace\n");
377 		do_exit(SIGKILL);
378 	}
379 	return 1;
380 }
381 
382 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
383 {
384 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
385 	struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
386 	struct kvm_s390_interrupt_info  *inti;
387 	int rc = 0;
388 
389 	if (atomic_read(&li->active)) {
390 		spin_lock_bh(&li->lock);
391 		list_for_each_entry(inti, &li->list, list)
392 			if (__interrupt_is_deliverable(vcpu, inti)) {
393 				rc = 1;
394 				break;
395 			}
396 		spin_unlock_bh(&li->lock);
397 	}
398 
399 	if ((!rc) && atomic_read(&fi->active)) {
400 		spin_lock(&fi->lock);
401 		list_for_each_entry(inti, &fi->list, list)
402 			if (__interrupt_is_deliverable(vcpu, inti)) {
403 				rc = 1;
404 				break;
405 			}
406 		spin_unlock(&fi->lock);
407 	}
408 
409 	if ((!rc) && (vcpu->arch.sie_block->ckc <
410 		get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
411 		if ((!psw_extint_disabled(vcpu)) &&
412 			(vcpu->arch.sie_block->gcr[0] & 0x800ul))
413 			rc = 1;
414 	}
415 
416 	return rc;
417 }
418 
419 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
420 {
421 	return 0;
422 }
423 
424 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
425 {
426 	u64 now, sltime;
427 	DECLARE_WAITQUEUE(wait, current);
428 
429 	vcpu->stat.exit_wait_state++;
430 	if (kvm_cpu_has_interrupt(vcpu))
431 		return 0;
432 
433 	__set_cpu_idle(vcpu);
434 	spin_lock_bh(&vcpu->arch.local_int.lock);
435 	vcpu->arch.local_int.timer_due = 0;
436 	spin_unlock_bh(&vcpu->arch.local_int.lock);
437 
438 	if (psw_interrupts_disabled(vcpu)) {
439 		VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
440 		__unset_cpu_idle(vcpu);
441 		return -EOPNOTSUPP; /* disabled wait */
442 	}
443 
444 	if (psw_extint_disabled(vcpu) ||
445 	    (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
446 		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
447 		goto no_timer;
448 	}
449 
450 	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
451 	if (vcpu->arch.sie_block->ckc < now) {
452 		__unset_cpu_idle(vcpu);
453 		return 0;
454 	}
455 
456 	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
457 
458 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
459 	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
460 no_timer:
461 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
462 	spin_lock(&vcpu->arch.local_int.float_int->lock);
463 	spin_lock_bh(&vcpu->arch.local_int.lock);
464 	add_wait_queue(&vcpu->wq, &wait);
465 	while (list_empty(&vcpu->arch.local_int.list) &&
466 		list_empty(&vcpu->arch.local_int.float_int->list) &&
467 		(!vcpu->arch.local_int.timer_due) &&
468 		!signal_pending(current)) {
469 		set_current_state(TASK_INTERRUPTIBLE);
470 		spin_unlock_bh(&vcpu->arch.local_int.lock);
471 		spin_unlock(&vcpu->arch.local_int.float_int->lock);
472 		schedule();
473 		spin_lock(&vcpu->arch.local_int.float_int->lock);
474 		spin_lock_bh(&vcpu->arch.local_int.lock);
475 	}
476 	__unset_cpu_idle(vcpu);
477 	__set_current_state(TASK_RUNNING);
478 	remove_wait_queue(&vcpu->wq, &wait);
479 	spin_unlock_bh(&vcpu->arch.local_int.lock);
480 	spin_unlock(&vcpu->arch.local_int.float_int->lock);
481 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
482 
483 	hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
484 	return 0;
485 }
486 
487 void kvm_s390_tasklet(unsigned long parm)
488 {
489 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
490 
491 	spin_lock(&vcpu->arch.local_int.lock);
492 	vcpu->arch.local_int.timer_due = 1;
493 	if (waitqueue_active(&vcpu->wq))
494 		wake_up_interruptible(&vcpu->wq);
495 	spin_unlock(&vcpu->arch.local_int.lock);
496 }
497 
498 /*
499  * low level hrtimer wake routine. Because this runs in hardirq context
500  * we schedule a tasklet to do the real work.
501  */
502 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
503 {
504 	struct kvm_vcpu *vcpu;
505 
506 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
507 	vcpu->preempted = true;
508 	tasklet_schedule(&vcpu->arch.tasklet);
509 
510 	return HRTIMER_NORESTART;
511 }
512 
513 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
514 {
515 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
516 	struct kvm_s390_interrupt_info  *n, *inti = NULL;
517 
518 	spin_lock_bh(&li->lock);
519 	list_for_each_entry_safe(inti, n, &li->list, list) {
520 		list_del(&inti->list);
521 		kfree(inti);
522 	}
523 	atomic_set(&li->active, 0);
524 	spin_unlock_bh(&li->lock);
525 }
526 
527 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
528 {
529 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
530 	struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
531 	struct kvm_s390_interrupt_info  *n, *inti = NULL;
532 	int deliver;
533 
534 	__reset_intercept_indicators(vcpu);
535 	if (atomic_read(&li->active)) {
536 		do {
537 			deliver = 0;
538 			spin_lock_bh(&li->lock);
539 			list_for_each_entry_safe(inti, n, &li->list, list) {
540 				if (__interrupt_is_deliverable(vcpu, inti)) {
541 					list_del(&inti->list);
542 					deliver = 1;
543 					break;
544 				}
545 				__set_intercept_indicator(vcpu, inti);
546 			}
547 			if (list_empty(&li->list))
548 				atomic_set(&li->active, 0);
549 			spin_unlock_bh(&li->lock);
550 			if (deliver) {
551 				__do_deliver_interrupt(vcpu, inti);
552 				kfree(inti);
553 			}
554 		} while (deliver);
555 	}
556 
557 	if ((vcpu->arch.sie_block->ckc <
558 		get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
559 		__try_deliver_ckc_interrupt(vcpu);
560 
561 	if (atomic_read(&fi->active)) {
562 		do {
563 			deliver = 0;
564 			spin_lock(&fi->lock);
565 			list_for_each_entry_safe(inti, n, &fi->list, list) {
566 				if (__interrupt_is_deliverable(vcpu, inti)) {
567 					list_del(&inti->list);
568 					fi->irq_count--;
569 					deliver = 1;
570 					break;
571 				}
572 				__set_intercept_indicator(vcpu, inti);
573 			}
574 			if (list_empty(&fi->list))
575 				atomic_set(&fi->active, 0);
576 			spin_unlock(&fi->lock);
577 			if (deliver) {
578 				__do_deliver_interrupt(vcpu, inti);
579 				kfree(inti);
580 			}
581 		} while (deliver);
582 	}
583 }
584 
585 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
586 {
587 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
588 	struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
589 	struct kvm_s390_interrupt_info  *n, *inti = NULL;
590 	int deliver;
591 
592 	__reset_intercept_indicators(vcpu);
593 	if (atomic_read(&li->active)) {
594 		do {
595 			deliver = 0;
596 			spin_lock_bh(&li->lock);
597 			list_for_each_entry_safe(inti, n, &li->list, list) {
598 				if ((inti->type == KVM_S390_MCHK) &&
599 				    __interrupt_is_deliverable(vcpu, inti)) {
600 					list_del(&inti->list);
601 					deliver = 1;
602 					break;
603 				}
604 				__set_intercept_indicator(vcpu, inti);
605 			}
606 			if (list_empty(&li->list))
607 				atomic_set(&li->active, 0);
608 			spin_unlock_bh(&li->lock);
609 			if (deliver) {
610 				__do_deliver_interrupt(vcpu, inti);
611 				kfree(inti);
612 			}
613 		} while (deliver);
614 	}
615 
616 	if (atomic_read(&fi->active)) {
617 		do {
618 			deliver = 0;
619 			spin_lock(&fi->lock);
620 			list_for_each_entry_safe(inti, n, &fi->list, list) {
621 				if ((inti->type == KVM_S390_MCHK) &&
622 				    __interrupt_is_deliverable(vcpu, inti)) {
623 					list_del(&inti->list);
624 					fi->irq_count--;
625 					deliver = 1;
626 					break;
627 				}
628 				__set_intercept_indicator(vcpu, inti);
629 			}
630 			if (list_empty(&fi->list))
631 				atomic_set(&fi->active, 0);
632 			spin_unlock(&fi->lock);
633 			if (deliver) {
634 				__do_deliver_interrupt(vcpu, inti);
635 				kfree(inti);
636 			}
637 		} while (deliver);
638 	}
639 }
640 
641 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
642 {
643 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
644 	struct kvm_s390_interrupt_info *inti;
645 
646 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
647 	if (!inti)
648 		return -ENOMEM;
649 
650 	inti->type = KVM_S390_PROGRAM_INT;
651 	inti->pgm.code = code;
652 
653 	VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
654 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
655 	spin_lock_bh(&li->lock);
656 	list_add(&inti->list, &li->list);
657 	atomic_set(&li->active, 1);
658 	BUG_ON(waitqueue_active(li->wq));
659 	spin_unlock_bh(&li->lock);
660 	return 0;
661 }
662 
663 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
664 						    u64 cr6, u64 schid)
665 {
666 	struct kvm_s390_float_interrupt *fi;
667 	struct kvm_s390_interrupt_info *inti, *iter;
668 
669 	if ((!schid && !cr6) || (schid && cr6))
670 		return NULL;
671 	mutex_lock(&kvm->lock);
672 	fi = &kvm->arch.float_int;
673 	spin_lock(&fi->lock);
674 	inti = NULL;
675 	list_for_each_entry(iter, &fi->list, list) {
676 		if (!is_ioint(iter->type))
677 			continue;
678 		if (cr6 &&
679 		    ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
680 			continue;
681 		if (schid) {
682 			if (((schid & 0x00000000ffff0000) >> 16) !=
683 			    iter->io.subchannel_id)
684 				continue;
685 			if ((schid & 0x000000000000ffff) !=
686 			    iter->io.subchannel_nr)
687 				continue;
688 		}
689 		inti = iter;
690 		break;
691 	}
692 	if (inti) {
693 		list_del_init(&inti->list);
694 		fi->irq_count--;
695 	}
696 	if (list_empty(&fi->list))
697 		atomic_set(&fi->active, 0);
698 	spin_unlock(&fi->lock);
699 	mutex_unlock(&kvm->lock);
700 	return inti;
701 }
702 
703 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
704 {
705 	struct kvm_s390_local_interrupt *li;
706 	struct kvm_s390_float_interrupt *fi;
707 	struct kvm_s390_interrupt_info *iter;
708 	struct kvm_vcpu *dst_vcpu = NULL;
709 	int sigcpu;
710 	int rc = 0;
711 
712 	mutex_lock(&kvm->lock);
713 	fi = &kvm->arch.float_int;
714 	spin_lock(&fi->lock);
715 	if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
716 		rc = -EINVAL;
717 		goto unlock_fi;
718 	}
719 	fi->irq_count++;
720 	if (!is_ioint(inti->type)) {
721 		list_add_tail(&inti->list, &fi->list);
722 	} else {
723 		u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
724 
725 		/* Keep I/O interrupts sorted in isc order. */
726 		list_for_each_entry(iter, &fi->list, list) {
727 			if (!is_ioint(iter->type))
728 				continue;
729 			if (int_word_to_isc_bits(iter->io.io_int_word)
730 			    <= isc_bits)
731 				continue;
732 			break;
733 		}
734 		list_add_tail(&inti->list, &iter->list);
735 	}
736 	atomic_set(&fi->active, 1);
737 	sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
738 	if (sigcpu == KVM_MAX_VCPUS) {
739 		do {
740 			sigcpu = fi->next_rr_cpu++;
741 			if (sigcpu == KVM_MAX_VCPUS)
742 				sigcpu = fi->next_rr_cpu = 0;
743 		} while (kvm_get_vcpu(kvm, sigcpu) == NULL);
744 	}
745 	dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
746 	li = &dst_vcpu->arch.local_int;
747 	spin_lock_bh(&li->lock);
748 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
749 	if (waitqueue_active(li->wq))
750 		wake_up_interruptible(li->wq);
751 	kvm_get_vcpu(kvm, sigcpu)->preempted = true;
752 	spin_unlock_bh(&li->lock);
753 unlock_fi:
754 	spin_unlock(&fi->lock);
755 	mutex_unlock(&kvm->lock);
756 	return rc;
757 }
758 
759 int kvm_s390_inject_vm(struct kvm *kvm,
760 		       struct kvm_s390_interrupt *s390int)
761 {
762 	struct kvm_s390_interrupt_info *inti;
763 
764 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
765 	if (!inti)
766 		return -ENOMEM;
767 
768 	inti->type = s390int->type;
769 	switch (inti->type) {
770 	case KVM_S390_INT_VIRTIO:
771 		VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
772 			 s390int->parm, s390int->parm64);
773 		inti->ext.ext_params = s390int->parm;
774 		inti->ext.ext_params2 = s390int->parm64;
775 		break;
776 	case KVM_S390_INT_SERVICE:
777 		VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
778 		inti->ext.ext_params = s390int->parm;
779 		break;
780 	case KVM_S390_INT_PFAULT_DONE:
781 		inti->type = s390int->type;
782 		inti->ext.ext_params2 = s390int->parm64;
783 		break;
784 	case KVM_S390_MCHK:
785 		VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
786 			 s390int->parm64);
787 		inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
788 		inti->mchk.mcic = s390int->parm64;
789 		break;
790 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
791 		if (inti->type & IOINT_AI_MASK)
792 			VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
793 		else
794 			VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
795 				 s390int->type & IOINT_CSSID_MASK,
796 				 s390int->type & IOINT_SSID_MASK,
797 				 s390int->type & IOINT_SCHID_MASK);
798 		inti->io.subchannel_id = s390int->parm >> 16;
799 		inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
800 		inti->io.io_int_parm = s390int->parm64 >> 32;
801 		inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
802 		break;
803 	default:
804 		kfree(inti);
805 		return -EINVAL;
806 	}
807 	trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
808 				 2);
809 
810 	return __inject_vm(kvm, inti);
811 }
812 
813 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
814 			 struct kvm_s390_interrupt *s390int)
815 {
816 	struct kvm_s390_local_interrupt *li;
817 	struct kvm_s390_interrupt_info *inti;
818 
819 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
820 	if (!inti)
821 		return -ENOMEM;
822 
823 	switch (s390int->type) {
824 	case KVM_S390_PROGRAM_INT:
825 		if (s390int->parm & 0xffff0000) {
826 			kfree(inti);
827 			return -EINVAL;
828 		}
829 		inti->type = s390int->type;
830 		inti->pgm.code = s390int->parm;
831 		VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
832 			   s390int->parm);
833 		break;
834 	case KVM_S390_SIGP_SET_PREFIX:
835 		inti->prefix.address = s390int->parm;
836 		inti->type = s390int->type;
837 		VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
838 			   s390int->parm);
839 		break;
840 	case KVM_S390_SIGP_STOP:
841 	case KVM_S390_RESTART:
842 		VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
843 		inti->type = s390int->type;
844 		break;
845 	case KVM_S390_INT_EXTERNAL_CALL:
846 		if (s390int->parm & 0xffff0000) {
847 			kfree(inti);
848 			return -EINVAL;
849 		}
850 		VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
851 			   s390int->parm);
852 		inti->type = s390int->type;
853 		inti->extcall.code = s390int->parm;
854 		break;
855 	case KVM_S390_INT_EMERGENCY:
856 		if (s390int->parm & 0xffff0000) {
857 			kfree(inti);
858 			return -EINVAL;
859 		}
860 		VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
861 		inti->type = s390int->type;
862 		inti->emerg.code = s390int->parm;
863 		break;
864 	case KVM_S390_MCHK:
865 		VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
866 			   s390int->parm64);
867 		inti->type = s390int->type;
868 		inti->mchk.mcic = s390int->parm64;
869 		break;
870 	case KVM_S390_INT_PFAULT_INIT:
871 		inti->type = s390int->type;
872 		inti->ext.ext_params2 = s390int->parm64;
873 		break;
874 	case KVM_S390_INT_VIRTIO:
875 	case KVM_S390_INT_SERVICE:
876 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
877 	default:
878 		kfree(inti);
879 		return -EINVAL;
880 	}
881 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
882 				   s390int->parm64, 2);
883 
884 	mutex_lock(&vcpu->kvm->lock);
885 	li = &vcpu->arch.local_int;
886 	spin_lock_bh(&li->lock);
887 	if (inti->type == KVM_S390_PROGRAM_INT)
888 		list_add(&inti->list, &li->list);
889 	else
890 		list_add_tail(&inti->list, &li->list);
891 	atomic_set(&li->active, 1);
892 	if (inti->type == KVM_S390_SIGP_STOP)
893 		li->action_bits |= ACTION_STOP_ON_STOP;
894 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
895 	if (waitqueue_active(&vcpu->wq))
896 		wake_up_interruptible(&vcpu->wq);
897 	vcpu->preempted = true;
898 	spin_unlock_bh(&li->lock);
899 	mutex_unlock(&vcpu->kvm->lock);
900 	return 0;
901 }
902 
903 static void clear_floating_interrupts(struct kvm *kvm)
904 {
905 	struct kvm_s390_float_interrupt *fi;
906 	struct kvm_s390_interrupt_info	*n, *inti = NULL;
907 
908 	mutex_lock(&kvm->lock);
909 	fi = &kvm->arch.float_int;
910 	spin_lock(&fi->lock);
911 	list_for_each_entry_safe(inti, n, &fi->list, list) {
912 		list_del(&inti->list);
913 		kfree(inti);
914 	}
915 	fi->irq_count = 0;
916 	atomic_set(&fi->active, 0);
917 	spin_unlock(&fi->lock);
918 	mutex_unlock(&kvm->lock);
919 }
920 
921 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
922 				   u8 *addr)
923 {
924 	struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
925 	struct kvm_s390_irq irq = {0};
926 
927 	irq.type = inti->type;
928 	switch (inti->type) {
929 	case KVM_S390_INT_PFAULT_INIT:
930 	case KVM_S390_INT_PFAULT_DONE:
931 	case KVM_S390_INT_VIRTIO:
932 	case KVM_S390_INT_SERVICE:
933 		irq.u.ext = inti->ext;
934 		break;
935 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
936 		irq.u.io = inti->io;
937 		break;
938 	case KVM_S390_MCHK:
939 		irq.u.mchk = inti->mchk;
940 		break;
941 	default:
942 		return -EINVAL;
943 	}
944 
945 	if (copy_to_user(uptr, &irq, sizeof(irq)))
946 		return -EFAULT;
947 
948 	return 0;
949 }
950 
951 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
952 {
953 	struct kvm_s390_interrupt_info *inti;
954 	struct kvm_s390_float_interrupt *fi;
955 	int ret = 0;
956 	int n = 0;
957 
958 	mutex_lock(&kvm->lock);
959 	fi = &kvm->arch.float_int;
960 	spin_lock(&fi->lock);
961 
962 	list_for_each_entry(inti, &fi->list, list) {
963 		if (len < sizeof(struct kvm_s390_irq)) {
964 			/* signal userspace to try again */
965 			ret = -ENOMEM;
966 			break;
967 		}
968 		ret = copy_irq_to_user(inti, buf);
969 		if (ret)
970 			break;
971 		buf += sizeof(struct kvm_s390_irq);
972 		len -= sizeof(struct kvm_s390_irq);
973 		n++;
974 	}
975 
976 	spin_unlock(&fi->lock);
977 	mutex_unlock(&kvm->lock);
978 
979 	return ret < 0 ? ret : n;
980 }
981 
982 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
983 {
984 	int r;
985 
986 	switch (attr->group) {
987 	case KVM_DEV_FLIC_GET_ALL_IRQS:
988 		r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
989 					  attr->attr);
990 		break;
991 	default:
992 		r = -EINVAL;
993 	}
994 
995 	return r;
996 }
997 
998 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
999 				     u64 addr)
1000 {
1001 	struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1002 	void *target = NULL;
1003 	void __user *source;
1004 	u64 size;
1005 
1006 	if (get_user(inti->type, (u64 __user *)addr))
1007 		return -EFAULT;
1008 
1009 	switch (inti->type) {
1010 	case KVM_S390_INT_PFAULT_INIT:
1011 	case KVM_S390_INT_PFAULT_DONE:
1012 	case KVM_S390_INT_VIRTIO:
1013 	case KVM_S390_INT_SERVICE:
1014 		target = (void *) &inti->ext;
1015 		source = &uptr->u.ext;
1016 		size = sizeof(inti->ext);
1017 		break;
1018 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1019 		target = (void *) &inti->io;
1020 		source = &uptr->u.io;
1021 		size = sizeof(inti->io);
1022 		break;
1023 	case KVM_S390_MCHK:
1024 		target = (void *) &inti->mchk;
1025 		source = &uptr->u.mchk;
1026 		size = sizeof(inti->mchk);
1027 		break;
1028 	default:
1029 		return -EINVAL;
1030 	}
1031 
1032 	if (copy_from_user(target, source, size))
1033 		return -EFAULT;
1034 
1035 	return 0;
1036 }
1037 
1038 static int enqueue_floating_irq(struct kvm_device *dev,
1039 				struct kvm_device_attr *attr)
1040 {
1041 	struct kvm_s390_interrupt_info *inti = NULL;
1042 	int r = 0;
1043 	int len = attr->attr;
1044 
1045 	if (len % sizeof(struct kvm_s390_irq) != 0)
1046 		return -EINVAL;
1047 	else if (len > KVM_S390_FLIC_MAX_BUFFER)
1048 		return -EINVAL;
1049 
1050 	while (len >= sizeof(struct kvm_s390_irq)) {
1051 		inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1052 		if (!inti)
1053 			return -ENOMEM;
1054 
1055 		r = copy_irq_from_user(inti, attr->addr);
1056 		if (r) {
1057 			kfree(inti);
1058 			return r;
1059 		}
1060 		r = __inject_vm(dev->kvm, inti);
1061 		if (r) {
1062 			kfree(inti);
1063 			return r;
1064 		}
1065 		len -= sizeof(struct kvm_s390_irq);
1066 		attr->addr += sizeof(struct kvm_s390_irq);
1067 	}
1068 
1069 	return r;
1070 }
1071 
1072 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1073 {
1074 	if (id >= MAX_S390_IO_ADAPTERS)
1075 		return NULL;
1076 	return kvm->arch.adapters[id];
1077 }
1078 
1079 static int register_io_adapter(struct kvm_device *dev,
1080 			       struct kvm_device_attr *attr)
1081 {
1082 	struct s390_io_adapter *adapter;
1083 	struct kvm_s390_io_adapter adapter_info;
1084 
1085 	if (copy_from_user(&adapter_info,
1086 			   (void __user *)attr->addr, sizeof(adapter_info)))
1087 		return -EFAULT;
1088 
1089 	if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1090 	    (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1091 		return -EINVAL;
1092 
1093 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1094 	if (!adapter)
1095 		return -ENOMEM;
1096 
1097 	INIT_LIST_HEAD(&adapter->maps);
1098 	init_rwsem(&adapter->maps_lock);
1099 	atomic_set(&adapter->nr_maps, 0);
1100 	adapter->id = adapter_info.id;
1101 	adapter->isc = adapter_info.isc;
1102 	adapter->maskable = adapter_info.maskable;
1103 	adapter->masked = false;
1104 	adapter->swap = adapter_info.swap;
1105 	dev->kvm->arch.adapters[adapter->id] = adapter;
1106 
1107 	return 0;
1108 }
1109 
1110 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1111 {
1112 	int ret;
1113 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1114 
1115 	if (!adapter || !adapter->maskable)
1116 		return -EINVAL;
1117 	ret = adapter->masked;
1118 	adapter->masked = masked;
1119 	return ret;
1120 }
1121 
1122 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1123 {
1124 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1125 	struct s390_map_info *map;
1126 	int ret;
1127 
1128 	if (!adapter || !addr)
1129 		return -EINVAL;
1130 
1131 	map = kzalloc(sizeof(*map), GFP_KERNEL);
1132 	if (!map) {
1133 		ret = -ENOMEM;
1134 		goto out;
1135 	}
1136 	INIT_LIST_HEAD(&map->list);
1137 	map->guest_addr = addr;
1138 	map->addr = gmap_translate(addr, kvm->arch.gmap);
1139 	if (map->addr == -EFAULT) {
1140 		ret = -EFAULT;
1141 		goto out;
1142 	}
1143 	ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1144 	if (ret < 0)
1145 		goto out;
1146 	BUG_ON(ret != 1);
1147 	down_write(&adapter->maps_lock);
1148 	if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1149 		list_add_tail(&map->list, &adapter->maps);
1150 		ret = 0;
1151 	} else {
1152 		put_page(map->page);
1153 		ret = -EINVAL;
1154 	}
1155 	up_write(&adapter->maps_lock);
1156 out:
1157 	if (ret)
1158 		kfree(map);
1159 	return ret;
1160 }
1161 
1162 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1163 {
1164 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1165 	struct s390_map_info *map, *tmp;
1166 	int found = 0;
1167 
1168 	if (!adapter || !addr)
1169 		return -EINVAL;
1170 
1171 	down_write(&adapter->maps_lock);
1172 	list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1173 		if (map->guest_addr == addr) {
1174 			found = 1;
1175 			atomic_dec(&adapter->nr_maps);
1176 			list_del(&map->list);
1177 			put_page(map->page);
1178 			kfree(map);
1179 			break;
1180 		}
1181 	}
1182 	up_write(&adapter->maps_lock);
1183 
1184 	return found ? 0 : -EINVAL;
1185 }
1186 
1187 void kvm_s390_destroy_adapters(struct kvm *kvm)
1188 {
1189 	int i;
1190 	struct s390_map_info *map, *tmp;
1191 
1192 	for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1193 		if (!kvm->arch.adapters[i])
1194 			continue;
1195 		list_for_each_entry_safe(map, tmp,
1196 					 &kvm->arch.adapters[i]->maps, list) {
1197 			list_del(&map->list);
1198 			put_page(map->page);
1199 			kfree(map);
1200 		}
1201 		kfree(kvm->arch.adapters[i]);
1202 	}
1203 }
1204 
1205 static int modify_io_adapter(struct kvm_device *dev,
1206 			     struct kvm_device_attr *attr)
1207 {
1208 	struct kvm_s390_io_adapter_req req;
1209 	struct s390_io_adapter *adapter;
1210 	int ret;
1211 
1212 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1213 		return -EFAULT;
1214 
1215 	adapter = get_io_adapter(dev->kvm, req.id);
1216 	if (!adapter)
1217 		return -EINVAL;
1218 	switch (req.type) {
1219 	case KVM_S390_IO_ADAPTER_MASK:
1220 		ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1221 		if (ret > 0)
1222 			ret = 0;
1223 		break;
1224 	case KVM_S390_IO_ADAPTER_MAP:
1225 		ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1226 		break;
1227 	case KVM_S390_IO_ADAPTER_UNMAP:
1228 		ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1229 		break;
1230 	default:
1231 		ret = -EINVAL;
1232 	}
1233 
1234 	return ret;
1235 }
1236 
1237 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1238 {
1239 	int r = 0;
1240 	unsigned int i;
1241 	struct kvm_vcpu *vcpu;
1242 
1243 	switch (attr->group) {
1244 	case KVM_DEV_FLIC_ENQUEUE:
1245 		r = enqueue_floating_irq(dev, attr);
1246 		break;
1247 	case KVM_DEV_FLIC_CLEAR_IRQS:
1248 		r = 0;
1249 		clear_floating_interrupts(dev->kvm);
1250 		break;
1251 	case KVM_DEV_FLIC_APF_ENABLE:
1252 		dev->kvm->arch.gmap->pfault_enabled = 1;
1253 		break;
1254 	case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1255 		dev->kvm->arch.gmap->pfault_enabled = 0;
1256 		/*
1257 		 * Make sure no async faults are in transition when
1258 		 * clearing the queues. So we don't need to worry
1259 		 * about late coming workers.
1260 		 */
1261 		synchronize_srcu(&dev->kvm->srcu);
1262 		kvm_for_each_vcpu(i, vcpu, dev->kvm)
1263 			kvm_clear_async_pf_completion_queue(vcpu);
1264 		break;
1265 	case KVM_DEV_FLIC_ADAPTER_REGISTER:
1266 		r = register_io_adapter(dev, attr);
1267 		break;
1268 	case KVM_DEV_FLIC_ADAPTER_MODIFY:
1269 		r = modify_io_adapter(dev, attr);
1270 		break;
1271 	default:
1272 		r = -EINVAL;
1273 	}
1274 
1275 	return r;
1276 }
1277 
1278 static int flic_create(struct kvm_device *dev, u32 type)
1279 {
1280 	if (!dev)
1281 		return -EINVAL;
1282 	if (dev->kvm->arch.flic)
1283 		return -EINVAL;
1284 	dev->kvm->arch.flic = dev;
1285 	return 0;
1286 }
1287 
1288 static void flic_destroy(struct kvm_device *dev)
1289 {
1290 	dev->kvm->arch.flic = NULL;
1291 	kfree(dev);
1292 }
1293 
1294 /* s390 floating irq controller (flic) */
1295 struct kvm_device_ops kvm_flic_ops = {
1296 	.name = "kvm-flic",
1297 	.get_attr = flic_get_attr,
1298 	.set_attr = flic_set_attr,
1299 	.create = flic_create,
1300 	.destroy = flic_destroy,
1301 };
1302 
1303 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1304 {
1305 	unsigned long bit;
1306 
1307 	bit = bit_nr + (addr % PAGE_SIZE) * 8;
1308 
1309 	return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1310 }
1311 
1312 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1313 					  u64 addr)
1314 {
1315 	struct s390_map_info *map;
1316 
1317 	if (!adapter)
1318 		return NULL;
1319 
1320 	list_for_each_entry(map, &adapter->maps, list) {
1321 		if (map->guest_addr == addr)
1322 			return map;
1323 	}
1324 	return NULL;
1325 }
1326 
1327 static int adapter_indicators_set(struct kvm *kvm,
1328 				  struct s390_io_adapter *adapter,
1329 				  struct kvm_s390_adapter_int *adapter_int)
1330 {
1331 	unsigned long bit;
1332 	int summary_set, idx;
1333 	struct s390_map_info *info;
1334 	void *map;
1335 
1336 	info = get_map_info(adapter, adapter_int->ind_addr);
1337 	if (!info)
1338 		return -1;
1339 	map = page_address(info->page);
1340 	bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1341 	set_bit(bit, map);
1342 	idx = srcu_read_lock(&kvm->srcu);
1343 	mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1344 	set_page_dirty_lock(info->page);
1345 	info = get_map_info(adapter, adapter_int->summary_addr);
1346 	if (!info) {
1347 		srcu_read_unlock(&kvm->srcu, idx);
1348 		return -1;
1349 	}
1350 	map = page_address(info->page);
1351 	bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1352 			  adapter->swap);
1353 	summary_set = test_and_set_bit(bit, map);
1354 	mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1355 	set_page_dirty_lock(info->page);
1356 	srcu_read_unlock(&kvm->srcu, idx);
1357 	return summary_set ? 0 : 1;
1358 }
1359 
1360 /*
1361  * < 0 - not injected due to error
1362  * = 0 - coalesced, summary indicator already active
1363  * > 0 - injected interrupt
1364  */
1365 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1366 			   struct kvm *kvm, int irq_source_id, int level,
1367 			   bool line_status)
1368 {
1369 	int ret;
1370 	struct s390_io_adapter *adapter;
1371 
1372 	/* We're only interested in the 0->1 transition. */
1373 	if (!level)
1374 		return 0;
1375 	adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1376 	if (!adapter)
1377 		return -1;
1378 	down_read(&adapter->maps_lock);
1379 	ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1380 	up_read(&adapter->maps_lock);
1381 	if ((ret > 0) && !adapter->masked) {
1382 		struct kvm_s390_interrupt s390int = {
1383 			.type = KVM_S390_INT_IO(1, 0, 0, 0),
1384 			.parm = 0,
1385 			.parm64 = (adapter->isc << 27) | 0x80000000,
1386 		};
1387 		ret = kvm_s390_inject_vm(kvm, &s390int);
1388 		if (ret == 0)
1389 			ret = 1;
1390 	}
1391 	return ret;
1392 }
1393 
1394 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
1395 			  struct kvm_kernel_irq_routing_entry *e,
1396 			  const struct kvm_irq_routing_entry *ue)
1397 {
1398 	int ret;
1399 
1400 	switch (ue->type) {
1401 	case KVM_IRQ_ROUTING_S390_ADAPTER:
1402 		e->set = set_adapter_int;
1403 		e->adapter.summary_addr = ue->u.adapter.summary_addr;
1404 		e->adapter.ind_addr = ue->u.adapter.ind_addr;
1405 		e->adapter.summary_offset = ue->u.adapter.summary_offset;
1406 		e->adapter.ind_offset = ue->u.adapter.ind_offset;
1407 		e->adapter.adapter_id = ue->u.adapter.adapter_id;
1408 		ret = 0;
1409 		break;
1410 	default:
1411 		ret = -EINVAL;
1412 	}
1413 
1414 	return ret;
1415 }
1416 
1417 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1418 		int irq_source_id, int level, bool line_status)
1419 {
1420 	return -EINVAL;
1421 }
1422