1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008, 2015 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <linux/bitmap.h> 20 #include <linux/vmalloc.h> 21 #include <asm/asm-offsets.h> 22 #include <asm/dis.h> 23 #include <asm/uaccess.h> 24 #include <asm/sclp.h> 25 #include <asm/isc.h> 26 #include "kvm-s390.h" 27 #include "gaccess.h" 28 #include "trace-s390.h" 29 30 #define IOINT_SCHID_MASK 0x0000ffff 31 #define IOINT_SSID_MASK 0x00030000 32 #define IOINT_CSSID_MASK 0x03fc0000 33 #define PFAULT_INIT 0x0600 34 #define PFAULT_DONE 0x0680 35 #define VIRTIO_PARAM 0x0d00 36 37 /* handle external calls via sigp interpretation facility */ 38 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) 39 { 40 int c, scn; 41 42 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 43 return 0; 44 45 read_lock(&vcpu->kvm->arch.sca_lock); 46 if (vcpu->kvm->arch.use_esca) { 47 struct esca_block *sca = vcpu->kvm->arch.sca; 48 union esca_sigp_ctrl sigp_ctrl = 49 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 50 51 c = sigp_ctrl.c; 52 scn = sigp_ctrl.scn; 53 } else { 54 struct bsca_block *sca = vcpu->kvm->arch.sca; 55 union bsca_sigp_ctrl sigp_ctrl = 56 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 57 58 c = sigp_ctrl.c; 59 scn = sigp_ctrl.scn; 60 } 61 read_unlock(&vcpu->kvm->arch.sca_lock); 62 63 if (src_id) 64 *src_id = scn; 65 66 return c; 67 } 68 69 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) 70 { 71 int expect, rc; 72 73 read_lock(&vcpu->kvm->arch.sca_lock); 74 if (vcpu->kvm->arch.use_esca) { 75 struct esca_block *sca = vcpu->kvm->arch.sca; 76 union esca_sigp_ctrl *sigp_ctrl = 77 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 78 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 79 80 new_val.scn = src_id; 81 new_val.c = 1; 82 old_val.c = 0; 83 84 expect = old_val.value; 85 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 86 } else { 87 struct bsca_block *sca = vcpu->kvm->arch.sca; 88 union bsca_sigp_ctrl *sigp_ctrl = 89 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 90 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 91 92 new_val.scn = src_id; 93 new_val.c = 1; 94 old_val.c = 0; 95 96 expect = old_val.value; 97 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 98 } 99 read_unlock(&vcpu->kvm->arch.sca_lock); 100 101 if (rc != expect) { 102 /* another external call is pending */ 103 return -EBUSY; 104 } 105 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 106 return 0; 107 } 108 109 static void sca_clear_ext_call(struct kvm_vcpu *vcpu) 110 { 111 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 112 int rc, expect; 113 114 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); 115 read_lock(&vcpu->kvm->arch.sca_lock); 116 if (vcpu->kvm->arch.use_esca) { 117 struct esca_block *sca = vcpu->kvm->arch.sca; 118 union esca_sigp_ctrl *sigp_ctrl = 119 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 120 union esca_sigp_ctrl old = *sigp_ctrl; 121 122 expect = old.value; 123 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 124 } else { 125 struct bsca_block *sca = vcpu->kvm->arch.sca; 126 union bsca_sigp_ctrl *sigp_ctrl = 127 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 128 union bsca_sigp_ctrl old = *sigp_ctrl; 129 130 expect = old.value; 131 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 132 } 133 read_unlock(&vcpu->kvm->arch.sca_lock); 134 WARN_ON(rc != expect); /* cannot clear? */ 135 } 136 137 int psw_extint_disabled(struct kvm_vcpu *vcpu) 138 { 139 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 140 } 141 142 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 143 { 144 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 145 } 146 147 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 148 { 149 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 150 } 151 152 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 153 { 154 return psw_extint_disabled(vcpu) && 155 psw_ioint_disabled(vcpu) && 156 psw_mchk_disabled(vcpu); 157 } 158 159 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 160 { 161 if (psw_extint_disabled(vcpu) || 162 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 163 return 0; 164 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 165 /* No timer interrupts when single stepping */ 166 return 0; 167 return 1; 168 } 169 170 static int ckc_irq_pending(struct kvm_vcpu *vcpu) 171 { 172 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) 173 return 0; 174 return ckc_interrupts_enabled(vcpu); 175 } 176 177 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) 178 { 179 return !psw_extint_disabled(vcpu) && 180 (vcpu->arch.sie_block->gcr[0] & 0x400ul); 181 } 182 183 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) 184 { 185 return (vcpu->arch.sie_block->cputm >> 63) && 186 cpu_timer_interrupts_enabled(vcpu); 187 } 188 189 static inline int is_ioirq(unsigned long irq_type) 190 { 191 return ((irq_type >= IRQ_PEND_IO_ISC_0) && 192 (irq_type <= IRQ_PEND_IO_ISC_7)); 193 } 194 195 static uint64_t isc_to_isc_bits(int isc) 196 { 197 return (0x80 >> isc) << 24; 198 } 199 200 static inline u8 int_word_to_isc(u32 int_word) 201 { 202 return (int_word & 0x38000000) >> 27; 203 } 204 205 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) 206 { 207 return vcpu->kvm->arch.float_int.pending_irqs | 208 vcpu->arch.local_int.pending_irqs; 209 } 210 211 static unsigned long disable_iscs(struct kvm_vcpu *vcpu, 212 unsigned long active_mask) 213 { 214 int i; 215 216 for (i = 0; i <= MAX_ISC; i++) 217 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) 218 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i)); 219 220 return active_mask; 221 } 222 223 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) 224 { 225 unsigned long active_mask; 226 227 active_mask = pending_irqs(vcpu); 228 if (!active_mask) 229 return 0; 230 231 if (psw_extint_disabled(vcpu)) 232 active_mask &= ~IRQ_PEND_EXT_MASK; 233 if (psw_ioint_disabled(vcpu)) 234 active_mask &= ~IRQ_PEND_IO_MASK; 235 else 236 active_mask = disable_iscs(vcpu, active_mask); 237 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 238 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); 239 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) 240 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); 241 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 242 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); 243 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) 244 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); 245 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 246 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); 247 if (psw_mchk_disabled(vcpu)) 248 active_mask &= ~IRQ_PEND_MCHK_MASK; 249 if (!(vcpu->arch.sie_block->gcr[14] & 250 vcpu->kvm->arch.float_int.mchk.cr14)) 251 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); 252 253 /* 254 * STOP irqs will never be actively delivered. They are triggered via 255 * intercept requests and cleared when the stop intercept is performed. 256 */ 257 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask); 258 259 return active_mask; 260 } 261 262 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 263 { 264 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 265 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 266 } 267 268 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 269 { 270 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 271 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 272 } 273 274 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 275 { 276 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 277 &vcpu->arch.sie_block->cpuflags); 278 vcpu->arch.sie_block->lctl = 0x0000; 279 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 280 281 if (guestdbg_enabled(vcpu)) { 282 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 283 LCTL_CR10 | LCTL_CR11); 284 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 285 } 286 } 287 288 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 289 { 290 atomic_or(flag, &vcpu->arch.sie_block->cpuflags); 291 } 292 293 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 294 { 295 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) 296 return; 297 else if (psw_ioint_disabled(vcpu)) 298 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 299 else 300 vcpu->arch.sie_block->lctl |= LCTL_CR6; 301 } 302 303 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) 304 { 305 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) 306 return; 307 if (psw_extint_disabled(vcpu)) 308 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 309 else 310 vcpu->arch.sie_block->lctl |= LCTL_CR0; 311 } 312 313 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) 314 { 315 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) 316 return; 317 if (psw_mchk_disabled(vcpu)) 318 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 319 else 320 vcpu->arch.sie_block->lctl |= LCTL_CR14; 321 } 322 323 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) 324 { 325 if (kvm_s390_is_stop_irq_pending(vcpu)) 326 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 327 } 328 329 /* Set interception request for non-deliverable interrupts */ 330 static void set_intercept_indicators(struct kvm_vcpu *vcpu) 331 { 332 set_intercept_indicators_io(vcpu); 333 set_intercept_indicators_ext(vcpu); 334 set_intercept_indicators_mchk(vcpu); 335 set_intercept_indicators_stop(vcpu); 336 } 337 338 static u16 get_ilc(struct kvm_vcpu *vcpu) 339 { 340 switch (vcpu->arch.sie_block->icptcode) { 341 case ICPT_INST: 342 case ICPT_INSTPROGI: 343 case ICPT_OPEREXC: 344 case ICPT_PARTEXEC: 345 case ICPT_IOINST: 346 /* last instruction only stored for these icptcodes */ 347 return insn_length(vcpu->arch.sie_block->ipa >> 8); 348 case ICPT_PROGI: 349 return vcpu->arch.sie_block->pgmilc; 350 default: 351 return 0; 352 } 353 } 354 355 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) 356 { 357 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 358 int rc; 359 360 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 361 0, 0); 362 363 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 364 (u16 *)__LC_EXT_INT_CODE); 365 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 366 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 367 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 368 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 369 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 370 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 371 return rc ? -EFAULT : 0; 372 } 373 374 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) 375 { 376 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 377 int rc; 378 379 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 380 0, 0); 381 382 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, 383 (u16 __user *)__LC_EXT_INT_CODE); 384 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 385 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 387 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 389 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 390 return rc ? -EFAULT : 0; 391 } 392 393 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) 394 { 395 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 396 struct kvm_s390_ext_info ext; 397 int rc; 398 399 spin_lock(&li->lock); 400 ext = li->irq.ext; 401 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 402 li->irq.ext.ext_params2 = 0; 403 spin_unlock(&li->lock); 404 405 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", 406 ext.ext_params2); 407 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 408 KVM_S390_INT_PFAULT_INIT, 409 0, ext.ext_params2); 410 411 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); 412 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); 413 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 414 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 415 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 416 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 417 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); 418 return rc ? -EFAULT : 0; 419 } 420 421 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) 422 { 423 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 424 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 425 struct kvm_s390_mchk_info mchk = {}; 426 unsigned long adtl_status_addr; 427 int deliver = 0; 428 int rc = 0; 429 430 spin_lock(&fi->lock); 431 spin_lock(&li->lock); 432 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || 433 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { 434 /* 435 * If there was an exigent machine check pending, then any 436 * repressible machine checks that might have been pending 437 * are indicated along with it, so always clear bits for 438 * repressible and exigent interrupts 439 */ 440 mchk = li->irq.mchk; 441 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 442 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 443 memset(&li->irq.mchk, 0, sizeof(mchk)); 444 deliver = 1; 445 } 446 /* 447 * We indicate floating repressible conditions along with 448 * other pending conditions. Channel Report Pending and Channel 449 * Subsystem damage are the only two and and are indicated by 450 * bits in mcic and masked in cr14. 451 */ 452 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 453 mchk.mcic |= fi->mchk.mcic; 454 mchk.cr14 |= fi->mchk.cr14; 455 memset(&fi->mchk, 0, sizeof(mchk)); 456 deliver = 1; 457 } 458 spin_unlock(&li->lock); 459 spin_unlock(&fi->lock); 460 461 if (deliver) { 462 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", 463 mchk.mcic); 464 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 465 KVM_S390_MCHK, 466 mchk.cr14, mchk.mcic); 467 468 rc = kvm_s390_vcpu_store_status(vcpu, 469 KVM_S390_STORE_STATUS_PREFIXED); 470 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, 471 &adtl_status_addr, 472 sizeof(unsigned long)); 473 rc |= kvm_s390_vcpu_store_adtl_status(vcpu, 474 adtl_status_addr); 475 rc |= put_guest_lc(vcpu, mchk.mcic, 476 (u64 __user *) __LC_MCCK_CODE); 477 rc |= put_guest_lc(vcpu, mchk.failing_storage_address, 478 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); 479 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, 480 &mchk.fixed_logout, 481 sizeof(mchk.fixed_logout)); 482 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 483 &vcpu->arch.sie_block->gpsw, 484 sizeof(psw_t)); 485 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 486 &vcpu->arch.sie_block->gpsw, 487 sizeof(psw_t)); 488 } 489 return rc ? -EFAULT : 0; 490 } 491 492 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) 493 { 494 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 495 int rc; 496 497 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); 498 vcpu->stat.deliver_restart_signal++; 499 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 500 501 rc = write_guest_lc(vcpu, 502 offsetof(struct lowcore, restart_old_psw), 503 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 504 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), 505 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 506 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); 507 return rc ? -EFAULT : 0; 508 } 509 510 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) 511 { 512 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 513 struct kvm_s390_prefix_info prefix; 514 515 spin_lock(&li->lock); 516 prefix = li->irq.prefix; 517 li->irq.prefix.address = 0; 518 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 519 spin_unlock(&li->lock); 520 521 vcpu->stat.deliver_prefix_signal++; 522 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 523 KVM_S390_SIGP_SET_PREFIX, 524 prefix.address, 0); 525 526 kvm_s390_set_prefix(vcpu, prefix.address); 527 return 0; 528 } 529 530 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) 531 { 532 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 533 int rc; 534 int cpu_addr; 535 536 spin_lock(&li->lock); 537 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); 538 clear_bit(cpu_addr, li->sigp_emerg_pending); 539 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) 540 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 541 spin_unlock(&li->lock); 542 543 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); 544 vcpu->stat.deliver_emergency_signal++; 545 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 546 cpu_addr, 0); 547 548 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, 549 (u16 *)__LC_EXT_INT_CODE); 550 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); 551 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 552 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 553 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 554 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 555 return rc ? -EFAULT : 0; 556 } 557 558 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) 559 { 560 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 561 struct kvm_s390_extcall_info extcall; 562 int rc; 563 564 spin_lock(&li->lock); 565 extcall = li->irq.extcall; 566 li->irq.extcall.code = 0; 567 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 568 spin_unlock(&li->lock); 569 570 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); 571 vcpu->stat.deliver_external_call++; 572 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 573 KVM_S390_INT_EXTERNAL_CALL, 574 extcall.code, 0); 575 576 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, 577 (u16 *)__LC_EXT_INT_CODE); 578 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); 579 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 580 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 581 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, 582 sizeof(psw_t)); 583 return rc ? -EFAULT : 0; 584 } 585 586 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) 587 { 588 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 589 struct kvm_s390_pgm_info pgm_info; 590 int rc = 0, nullifying = false; 591 u16 ilc = get_ilc(vcpu); 592 593 spin_lock(&li->lock); 594 pgm_info = li->irq.pgm; 595 clear_bit(IRQ_PEND_PROG, &li->pending_irqs); 596 memset(&li->irq.pgm, 0, sizeof(pgm_info)); 597 spin_unlock(&li->lock); 598 599 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d", 600 pgm_info.code, ilc); 601 vcpu->stat.deliver_program_int++; 602 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 603 pgm_info.code, 0); 604 605 switch (pgm_info.code & ~PGM_PER) { 606 case PGM_AFX_TRANSLATION: 607 case PGM_ASX_TRANSLATION: 608 case PGM_EX_TRANSLATION: 609 case PGM_LFX_TRANSLATION: 610 case PGM_LSTE_SEQUENCE: 611 case PGM_LSX_TRANSLATION: 612 case PGM_LX_TRANSLATION: 613 case PGM_PRIMARY_AUTHORITY: 614 case PGM_SECONDARY_AUTHORITY: 615 nullifying = true; 616 /* fall through */ 617 case PGM_SPACE_SWITCH: 618 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 619 (u64 *)__LC_TRANS_EXC_CODE); 620 break; 621 case PGM_ALEN_TRANSLATION: 622 case PGM_ALE_SEQUENCE: 623 case PGM_ASTE_INSTANCE: 624 case PGM_ASTE_SEQUENCE: 625 case PGM_ASTE_VALIDITY: 626 case PGM_EXTENDED_AUTHORITY: 627 rc = put_guest_lc(vcpu, pgm_info.exc_access_id, 628 (u8 *)__LC_EXC_ACCESS_ID); 629 nullifying = true; 630 break; 631 case PGM_ASCE_TYPE: 632 case PGM_PAGE_TRANSLATION: 633 case PGM_REGION_FIRST_TRANS: 634 case PGM_REGION_SECOND_TRANS: 635 case PGM_REGION_THIRD_TRANS: 636 case PGM_SEGMENT_TRANSLATION: 637 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 638 (u64 *)__LC_TRANS_EXC_CODE); 639 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 640 (u8 *)__LC_EXC_ACCESS_ID); 641 rc |= put_guest_lc(vcpu, pgm_info.op_access_id, 642 (u8 *)__LC_OP_ACCESS_ID); 643 nullifying = true; 644 break; 645 case PGM_MONITOR: 646 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, 647 (u16 *)__LC_MON_CLASS_NR); 648 rc |= put_guest_lc(vcpu, pgm_info.mon_code, 649 (u64 *)__LC_MON_CODE); 650 break; 651 case PGM_VECTOR_PROCESSING: 652 case PGM_DATA: 653 rc = put_guest_lc(vcpu, pgm_info.data_exc_code, 654 (u32 *)__LC_DATA_EXC_CODE); 655 break; 656 case PGM_PROTECTION: 657 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 658 (u64 *)__LC_TRANS_EXC_CODE); 659 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 660 (u8 *)__LC_EXC_ACCESS_ID); 661 break; 662 case PGM_STACK_FULL: 663 case PGM_STACK_EMPTY: 664 case PGM_STACK_SPECIFICATION: 665 case PGM_STACK_TYPE: 666 case PGM_STACK_OPERATION: 667 case PGM_TRACE_TABEL: 668 case PGM_CRYPTO_OPERATION: 669 nullifying = true; 670 break; 671 } 672 673 if (pgm_info.code & PGM_PER) { 674 rc |= put_guest_lc(vcpu, pgm_info.per_code, 675 (u8 *) __LC_PER_CODE); 676 rc |= put_guest_lc(vcpu, pgm_info.per_atmid, 677 (u8 *)__LC_PER_ATMID); 678 rc |= put_guest_lc(vcpu, pgm_info.per_address, 679 (u64 *) __LC_PER_ADDRESS); 680 rc |= put_guest_lc(vcpu, pgm_info.per_access_id, 681 (u8 *) __LC_PER_ACCESS_ID); 682 } 683 684 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) 685 kvm_s390_rewind_psw(vcpu, ilc); 686 687 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); 688 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, 689 (u64 *) __LC_LAST_BREAK); 690 rc |= put_guest_lc(vcpu, pgm_info.code, 691 (u16 *)__LC_PGM_INT_CODE); 692 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 693 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 694 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 695 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 696 return rc ? -EFAULT : 0; 697 } 698 699 static int __must_check __deliver_service(struct kvm_vcpu *vcpu) 700 { 701 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 702 struct kvm_s390_ext_info ext; 703 int rc = 0; 704 705 spin_lock(&fi->lock); 706 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { 707 spin_unlock(&fi->lock); 708 return 0; 709 } 710 ext = fi->srv_signal; 711 memset(&fi->srv_signal, 0, sizeof(ext)); 712 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 713 spin_unlock(&fi->lock); 714 715 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", 716 ext.ext_params); 717 vcpu->stat.deliver_service_signal++; 718 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, 719 ext.ext_params, 0); 720 721 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); 722 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 723 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 724 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 725 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 726 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 727 rc |= put_guest_lc(vcpu, ext.ext_params, 728 (u32 *)__LC_EXT_PARAMS); 729 730 return rc ? -EFAULT : 0; 731 } 732 733 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) 734 { 735 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 736 struct kvm_s390_interrupt_info *inti; 737 int rc = 0; 738 739 spin_lock(&fi->lock); 740 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], 741 struct kvm_s390_interrupt_info, 742 list); 743 if (inti) { 744 list_del(&inti->list); 745 fi->counters[FIRQ_CNTR_PFAULT] -= 1; 746 } 747 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) 748 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 749 spin_unlock(&fi->lock); 750 751 if (inti) { 752 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 753 KVM_S390_INT_PFAULT_DONE, 0, 754 inti->ext.ext_params2); 755 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", 756 inti->ext.ext_params2); 757 758 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 759 (u16 *)__LC_EXT_INT_CODE); 760 rc |= put_guest_lc(vcpu, PFAULT_DONE, 761 (u16 *)__LC_EXT_CPU_ADDR); 762 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 763 &vcpu->arch.sie_block->gpsw, 764 sizeof(psw_t)); 765 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 766 &vcpu->arch.sie_block->gpsw, 767 sizeof(psw_t)); 768 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 769 (u64 *)__LC_EXT_PARAMS2); 770 kfree(inti); 771 } 772 return rc ? -EFAULT : 0; 773 } 774 775 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) 776 { 777 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 778 struct kvm_s390_interrupt_info *inti; 779 int rc = 0; 780 781 spin_lock(&fi->lock); 782 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], 783 struct kvm_s390_interrupt_info, 784 list); 785 if (inti) { 786 VCPU_EVENT(vcpu, 4, 787 "deliver: virtio parm: 0x%x,parm64: 0x%llx", 788 inti->ext.ext_params, inti->ext.ext_params2); 789 vcpu->stat.deliver_virtio_interrupt++; 790 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 791 inti->type, 792 inti->ext.ext_params, 793 inti->ext.ext_params2); 794 list_del(&inti->list); 795 fi->counters[FIRQ_CNTR_VIRTIO] -= 1; 796 } 797 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) 798 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 799 spin_unlock(&fi->lock); 800 801 if (inti) { 802 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 803 (u16 *)__LC_EXT_INT_CODE); 804 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, 805 (u16 *)__LC_EXT_CPU_ADDR); 806 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 807 &vcpu->arch.sie_block->gpsw, 808 sizeof(psw_t)); 809 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 810 &vcpu->arch.sie_block->gpsw, 811 sizeof(psw_t)); 812 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 813 (u32 *)__LC_EXT_PARAMS); 814 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 815 (u64 *)__LC_EXT_PARAMS2); 816 kfree(inti); 817 } 818 return rc ? -EFAULT : 0; 819 } 820 821 static int __must_check __deliver_io(struct kvm_vcpu *vcpu, 822 unsigned long irq_type) 823 { 824 struct list_head *isc_list; 825 struct kvm_s390_float_interrupt *fi; 826 struct kvm_s390_interrupt_info *inti = NULL; 827 int rc = 0; 828 829 fi = &vcpu->kvm->arch.float_int; 830 831 spin_lock(&fi->lock); 832 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0]; 833 inti = list_first_entry_or_null(isc_list, 834 struct kvm_s390_interrupt_info, 835 list); 836 if (inti) { 837 VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type); 838 vcpu->stat.deliver_io_int++; 839 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 840 inti->type, 841 ((__u32)inti->io.subchannel_id << 16) | 842 inti->io.subchannel_nr, 843 ((__u64)inti->io.io_int_parm << 32) | 844 inti->io.io_int_word); 845 list_del(&inti->list); 846 fi->counters[FIRQ_CNTR_IO] -= 1; 847 } 848 if (list_empty(isc_list)) 849 clear_bit(irq_type, &fi->pending_irqs); 850 spin_unlock(&fi->lock); 851 852 if (inti) { 853 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 854 (u16 *)__LC_SUBCHANNEL_ID); 855 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 856 (u16 *)__LC_SUBCHANNEL_NR); 857 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 858 (u32 *)__LC_IO_INT_PARM); 859 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 860 (u32 *)__LC_IO_INT_WORD); 861 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 862 &vcpu->arch.sie_block->gpsw, 863 sizeof(psw_t)); 864 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 865 &vcpu->arch.sie_block->gpsw, 866 sizeof(psw_t)); 867 kfree(inti); 868 } 869 870 return rc ? -EFAULT : 0; 871 } 872 873 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); 874 875 static const deliver_irq_t deliver_irq_funcs[] = { 876 [IRQ_PEND_MCHK_EX] = __deliver_machine_check, 877 [IRQ_PEND_MCHK_REP] = __deliver_machine_check, 878 [IRQ_PEND_PROG] = __deliver_prog, 879 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal, 880 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call, 881 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, 882 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, 883 [IRQ_PEND_RESTART] = __deliver_restart, 884 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, 885 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, 886 [IRQ_PEND_EXT_SERVICE] = __deliver_service, 887 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done, 888 [IRQ_PEND_VIRTIO] = __deliver_virtio, 889 }; 890 891 /* Check whether an external call is pending (deliverable or not) */ 892 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) 893 { 894 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 895 896 if (!sclp.has_sigpif) 897 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 898 899 return sca_ext_call_pending(vcpu, NULL); 900 } 901 902 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) 903 { 904 if (deliverable_irqs(vcpu)) 905 return 1; 906 907 if (kvm_cpu_has_pending_timer(vcpu)) 908 return 1; 909 910 /* external call pending and deliverable */ 911 if (kvm_s390_ext_call_pending(vcpu) && 912 !psw_extint_disabled(vcpu) && 913 (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 914 return 1; 915 916 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) 917 return 1; 918 return 0; 919 } 920 921 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 922 { 923 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); 924 } 925 926 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 927 { 928 u64 now, sltime; 929 930 vcpu->stat.exit_wait_state++; 931 932 /* fast path */ 933 if (kvm_arch_vcpu_runnable(vcpu)) 934 return 0; 935 936 if (psw_interrupts_disabled(vcpu)) { 937 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 938 return -EOPNOTSUPP; /* disabled wait */ 939 } 940 941 if (!ckc_interrupts_enabled(vcpu)) { 942 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 943 __set_cpu_idle(vcpu); 944 goto no_timer; 945 } 946 947 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 948 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 949 950 /* underflow */ 951 if (vcpu->arch.sie_block->ckc < now) 952 return 0; 953 954 __set_cpu_idle(vcpu); 955 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 956 VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime); 957 no_timer: 958 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 959 kvm_vcpu_block(vcpu); 960 __unset_cpu_idle(vcpu); 961 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 962 963 hrtimer_cancel(&vcpu->arch.ckc_timer); 964 return 0; 965 } 966 967 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) 968 { 969 if (waitqueue_active(&vcpu->wq)) { 970 /* 971 * The vcpu gave up the cpu voluntarily, mark it as a good 972 * yield-candidate. 973 */ 974 vcpu->preempted = true; 975 wake_up_interruptible(&vcpu->wq); 976 vcpu->stat.halt_wakeup++; 977 } 978 } 979 980 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 981 { 982 struct kvm_vcpu *vcpu; 983 u64 now, sltime; 984 985 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 986 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 987 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 988 989 /* 990 * If the monotonic clock runs faster than the tod clock we might be 991 * woken up too early and have to go back to sleep to avoid deadlocks. 992 */ 993 if (vcpu->arch.sie_block->ckc > now && 994 hrtimer_forward_now(timer, ns_to_ktime(sltime))) 995 return HRTIMER_RESTART; 996 kvm_s390_vcpu_wakeup(vcpu); 997 return HRTIMER_NORESTART; 998 } 999 1000 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 1001 { 1002 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1003 1004 spin_lock(&li->lock); 1005 li->pending_irqs = 0; 1006 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); 1007 memset(&li->irq, 0, sizeof(li->irq)); 1008 spin_unlock(&li->lock); 1009 1010 sca_clear_ext_call(vcpu); 1011 } 1012 1013 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 1014 { 1015 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1016 deliver_irq_t func; 1017 int rc = 0; 1018 unsigned long irq_type; 1019 unsigned long irqs; 1020 1021 __reset_intercept_indicators(vcpu); 1022 1023 /* pending ckc conditions might have been invalidated */ 1024 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1025 if (ckc_irq_pending(vcpu)) 1026 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1027 1028 /* pending cpu timer conditions might have been invalidated */ 1029 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1030 if (cpu_timer_irq_pending(vcpu)) 1031 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1032 1033 while ((irqs = deliverable_irqs(vcpu)) && !rc) { 1034 /* bits are in the order of interrupt priority */ 1035 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); 1036 if (is_ioirq(irq_type)) { 1037 rc = __deliver_io(vcpu, irq_type); 1038 } else { 1039 func = deliver_irq_funcs[irq_type]; 1040 if (!func) { 1041 WARN_ON_ONCE(func == NULL); 1042 clear_bit(irq_type, &li->pending_irqs); 1043 continue; 1044 } 1045 rc = func(vcpu); 1046 } 1047 } 1048 1049 set_intercept_indicators(vcpu); 1050 1051 return rc; 1052 } 1053 1054 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1055 { 1056 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1057 1058 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); 1059 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 1060 irq->u.pgm.code, 0); 1061 1062 if (irq->u.pgm.code == PGM_PER) { 1063 li->irq.pgm.code |= PGM_PER; 1064 /* only modify PER related information */ 1065 li->irq.pgm.per_address = irq->u.pgm.per_address; 1066 li->irq.pgm.per_code = irq->u.pgm.per_code; 1067 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; 1068 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; 1069 } else if (!(irq->u.pgm.code & PGM_PER)) { 1070 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | 1071 irq->u.pgm.code; 1072 /* only modify non-PER information */ 1073 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; 1074 li->irq.pgm.mon_code = irq->u.pgm.mon_code; 1075 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; 1076 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; 1077 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; 1078 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; 1079 } else { 1080 li->irq.pgm = irq->u.pgm; 1081 } 1082 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 1083 return 0; 1084 } 1085 1086 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1087 { 1088 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1089 1090 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", 1091 irq->u.ext.ext_params2); 1092 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 1093 irq->u.ext.ext_params, 1094 irq->u.ext.ext_params2); 1095 1096 li->irq.ext = irq->u.ext; 1097 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1098 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1099 return 0; 1100 } 1101 1102 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1103 { 1104 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1105 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1106 uint16_t src_id = irq->u.extcall.code; 1107 1108 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", 1109 src_id); 1110 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1111 src_id, 0); 1112 1113 /* sending vcpu invalid */ 1114 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) 1115 return -EINVAL; 1116 1117 if (sclp.has_sigpif) 1118 return sca_inject_ext_call(vcpu, src_id); 1119 1120 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1121 return -EBUSY; 1122 *extcall = irq->u.extcall; 1123 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1124 return 0; 1125 } 1126 1127 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1128 { 1129 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1130 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1131 1132 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", 1133 irq->u.prefix.address); 1134 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1135 irq->u.prefix.address, 0); 1136 1137 if (!is_vcpu_stopped(vcpu)) 1138 return -EBUSY; 1139 1140 *prefix = irq->u.prefix; 1141 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 1142 return 0; 1143 } 1144 1145 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS) 1146 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1147 { 1148 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1149 struct kvm_s390_stop_info *stop = &li->irq.stop; 1150 int rc = 0; 1151 1152 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); 1153 1154 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1155 return -EINVAL; 1156 1157 if (is_vcpu_stopped(vcpu)) { 1158 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS) 1159 rc = kvm_s390_store_status_unloaded(vcpu, 1160 KVM_S390_STORE_STATUS_NOADDR); 1161 return rc; 1162 } 1163 1164 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) 1165 return -EBUSY; 1166 stop->flags = irq->u.stop.flags; 1167 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 1168 return 0; 1169 } 1170 1171 static int __inject_sigp_restart(struct kvm_vcpu *vcpu, 1172 struct kvm_s390_irq *irq) 1173 { 1174 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1175 1176 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); 1177 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 1178 1179 set_bit(IRQ_PEND_RESTART, &li->pending_irqs); 1180 return 0; 1181 } 1182 1183 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 1184 struct kvm_s390_irq *irq) 1185 { 1186 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1187 1188 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", 1189 irq->u.emerg.code); 1190 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1191 irq->u.emerg.code, 0); 1192 1193 /* sending vcpu invalid */ 1194 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) 1195 return -EINVAL; 1196 1197 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1198 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1199 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1200 return 0; 1201 } 1202 1203 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1204 { 1205 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1206 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1207 1208 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", 1209 irq->u.mchk.mcic); 1210 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1211 irq->u.mchk.mcic); 1212 1213 /* 1214 * Because repressible machine checks can be indicated along with 1215 * exigent machine checks (PoP, Chapter 11, Interruption action) 1216 * we need to combine cr14, mcic and external damage code. 1217 * Failing storage address and the logout area should not be or'ed 1218 * together, we just indicate the last occurrence of the corresponding 1219 * machine check 1220 */ 1221 mchk->cr14 |= irq->u.mchk.cr14; 1222 mchk->mcic |= irq->u.mchk.mcic; 1223 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; 1224 mchk->failing_storage_address = irq->u.mchk.failing_storage_address; 1225 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, 1226 sizeof(mchk->fixed_logout)); 1227 if (mchk->mcic & MCHK_EX_MASK) 1228 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 1229 else if (mchk->mcic & MCHK_REP_MASK) 1230 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 1231 return 0; 1232 } 1233 1234 static int __inject_ckc(struct kvm_vcpu *vcpu) 1235 { 1236 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1237 1238 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); 1239 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 1240 0, 0); 1241 1242 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1243 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1244 return 0; 1245 } 1246 1247 static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 1248 { 1249 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1250 1251 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); 1252 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 1253 0, 0); 1254 1255 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1256 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1257 return 0; 1258 } 1259 1260 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, 1261 int isc, u32 schid) 1262 { 1263 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1264 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1265 struct kvm_s390_interrupt_info *iter; 1266 u16 id = (schid & 0xffff0000U) >> 16; 1267 u16 nr = schid & 0x0000ffffU; 1268 1269 spin_lock(&fi->lock); 1270 list_for_each_entry(iter, isc_list, list) { 1271 if (schid && (id != iter->io.subchannel_id || 1272 nr != iter->io.subchannel_nr)) 1273 continue; 1274 /* found an appropriate entry */ 1275 list_del_init(&iter->list); 1276 fi->counters[FIRQ_CNTR_IO] -= 1; 1277 if (list_empty(isc_list)) 1278 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); 1279 spin_unlock(&fi->lock); 1280 return iter; 1281 } 1282 spin_unlock(&fi->lock); 1283 return NULL; 1284 } 1285 1286 /* 1287 * Dequeue and return an I/O interrupt matching any of the interruption 1288 * subclasses as designated by the isc mask in cr6 and the schid (if != 0). 1289 */ 1290 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 1291 u64 isc_mask, u32 schid) 1292 { 1293 struct kvm_s390_interrupt_info *inti = NULL; 1294 int isc; 1295 1296 for (isc = 0; isc <= MAX_ISC && !inti; isc++) { 1297 if (isc_mask & isc_to_isc_bits(isc)) 1298 inti = get_io_int(kvm, isc, schid); 1299 } 1300 return inti; 1301 } 1302 1303 #define SCCB_MASK 0xFFFFFFF8 1304 #define SCCB_EVENT_PENDING 0x3 1305 1306 static int __inject_service(struct kvm *kvm, 1307 struct kvm_s390_interrupt_info *inti) 1308 { 1309 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1310 1311 spin_lock(&fi->lock); 1312 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; 1313 /* 1314 * Early versions of the QEMU s390 bios will inject several 1315 * service interrupts after another without handling a 1316 * condition code indicating busy. 1317 * We will silently ignore those superfluous sccb values. 1318 * A future version of QEMU will take care of serialization 1319 * of servc requests 1320 */ 1321 if (fi->srv_signal.ext_params & SCCB_MASK) 1322 goto out; 1323 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; 1324 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 1325 out: 1326 spin_unlock(&fi->lock); 1327 kfree(inti); 1328 return 0; 1329 } 1330 1331 static int __inject_virtio(struct kvm *kvm, 1332 struct kvm_s390_interrupt_info *inti) 1333 { 1334 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1335 1336 spin_lock(&fi->lock); 1337 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { 1338 spin_unlock(&fi->lock); 1339 return -EBUSY; 1340 } 1341 fi->counters[FIRQ_CNTR_VIRTIO] += 1; 1342 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); 1343 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 1344 spin_unlock(&fi->lock); 1345 return 0; 1346 } 1347 1348 static int __inject_pfault_done(struct kvm *kvm, 1349 struct kvm_s390_interrupt_info *inti) 1350 { 1351 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1352 1353 spin_lock(&fi->lock); 1354 if (fi->counters[FIRQ_CNTR_PFAULT] >= 1355 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { 1356 spin_unlock(&fi->lock); 1357 return -EBUSY; 1358 } 1359 fi->counters[FIRQ_CNTR_PFAULT] += 1; 1360 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); 1361 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 1362 spin_unlock(&fi->lock); 1363 return 0; 1364 } 1365 1366 #define CR_PENDING_SUBCLASS 28 1367 static int __inject_float_mchk(struct kvm *kvm, 1368 struct kvm_s390_interrupt_info *inti) 1369 { 1370 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1371 1372 spin_lock(&fi->lock); 1373 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); 1374 fi->mchk.mcic |= inti->mchk.mcic; 1375 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); 1376 spin_unlock(&fi->lock); 1377 kfree(inti); 1378 return 0; 1379 } 1380 1381 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1382 { 1383 struct kvm_s390_float_interrupt *fi; 1384 struct list_head *list; 1385 int isc; 1386 1387 fi = &kvm->arch.float_int; 1388 spin_lock(&fi->lock); 1389 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { 1390 spin_unlock(&fi->lock); 1391 return -EBUSY; 1392 } 1393 fi->counters[FIRQ_CNTR_IO] += 1; 1394 1395 isc = int_word_to_isc(inti->io.io_int_word); 1396 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1397 list_add_tail(&inti->list, list); 1398 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs); 1399 spin_unlock(&fi->lock); 1400 return 0; 1401 } 1402 1403 /* 1404 * Find a destination VCPU for a floating irq and kick it. 1405 */ 1406 static void __floating_irq_kick(struct kvm *kvm, u64 type) 1407 { 1408 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1409 struct kvm_s390_local_interrupt *li; 1410 struct kvm_vcpu *dst_vcpu; 1411 int sigcpu, online_vcpus, nr_tries = 0; 1412 1413 online_vcpus = atomic_read(&kvm->online_vcpus); 1414 if (!online_vcpus) 1415 return; 1416 1417 /* find idle VCPUs first, then round robin */ 1418 sigcpu = find_first_bit(fi->idle_mask, online_vcpus); 1419 if (sigcpu == online_vcpus) { 1420 do { 1421 sigcpu = fi->next_rr_cpu; 1422 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; 1423 /* avoid endless loops if all vcpus are stopped */ 1424 if (nr_tries++ >= online_vcpus) 1425 return; 1426 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); 1427 } 1428 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 1429 1430 /* make the VCPU drop out of the SIE, or wake it up if sleeping */ 1431 li = &dst_vcpu->arch.local_int; 1432 spin_lock(&li->lock); 1433 switch (type) { 1434 case KVM_S390_MCHK: 1435 atomic_or(CPUSTAT_STOP_INT, li->cpuflags); 1436 break; 1437 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1438 atomic_or(CPUSTAT_IO_INT, li->cpuflags); 1439 break; 1440 default: 1441 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1442 break; 1443 } 1444 spin_unlock(&li->lock); 1445 kvm_s390_vcpu_wakeup(dst_vcpu); 1446 } 1447 1448 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1449 { 1450 u64 type = READ_ONCE(inti->type); 1451 int rc; 1452 1453 switch (type) { 1454 case KVM_S390_MCHK: 1455 rc = __inject_float_mchk(kvm, inti); 1456 break; 1457 case KVM_S390_INT_VIRTIO: 1458 rc = __inject_virtio(kvm, inti); 1459 break; 1460 case KVM_S390_INT_SERVICE: 1461 rc = __inject_service(kvm, inti); 1462 break; 1463 case KVM_S390_INT_PFAULT_DONE: 1464 rc = __inject_pfault_done(kvm, inti); 1465 break; 1466 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1467 rc = __inject_io(kvm, inti); 1468 break; 1469 default: 1470 rc = -EINVAL; 1471 } 1472 if (rc) 1473 return rc; 1474 1475 __floating_irq_kick(kvm, type); 1476 return 0; 1477 } 1478 1479 int kvm_s390_inject_vm(struct kvm *kvm, 1480 struct kvm_s390_interrupt *s390int) 1481 { 1482 struct kvm_s390_interrupt_info *inti; 1483 int rc; 1484 1485 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1486 if (!inti) 1487 return -ENOMEM; 1488 1489 inti->type = s390int->type; 1490 switch (inti->type) { 1491 case KVM_S390_INT_VIRTIO: 1492 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 1493 s390int->parm, s390int->parm64); 1494 inti->ext.ext_params = s390int->parm; 1495 inti->ext.ext_params2 = s390int->parm64; 1496 break; 1497 case KVM_S390_INT_SERVICE: 1498 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); 1499 inti->ext.ext_params = s390int->parm; 1500 break; 1501 case KVM_S390_INT_PFAULT_DONE: 1502 inti->ext.ext_params2 = s390int->parm64; 1503 break; 1504 case KVM_S390_MCHK: 1505 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", 1506 s390int->parm64); 1507 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 1508 inti->mchk.mcic = s390int->parm64; 1509 break; 1510 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1511 if (inti->type & KVM_S390_INT_IO_AI_MASK) 1512 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 1513 else 1514 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 1515 s390int->type & IOINT_CSSID_MASK, 1516 s390int->type & IOINT_SSID_MASK, 1517 s390int->type & IOINT_SCHID_MASK); 1518 inti->io.subchannel_id = s390int->parm >> 16; 1519 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 1520 inti->io.io_int_parm = s390int->parm64 >> 32; 1521 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 1522 break; 1523 default: 1524 kfree(inti); 1525 return -EINVAL; 1526 } 1527 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 1528 2); 1529 1530 rc = __inject_vm(kvm, inti); 1531 if (rc) 1532 kfree(inti); 1533 return rc; 1534 } 1535 1536 int kvm_s390_reinject_io_int(struct kvm *kvm, 1537 struct kvm_s390_interrupt_info *inti) 1538 { 1539 return __inject_vm(kvm, inti); 1540 } 1541 1542 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, 1543 struct kvm_s390_irq *irq) 1544 { 1545 irq->type = s390int->type; 1546 switch (irq->type) { 1547 case KVM_S390_PROGRAM_INT: 1548 if (s390int->parm & 0xffff0000) 1549 return -EINVAL; 1550 irq->u.pgm.code = s390int->parm; 1551 break; 1552 case KVM_S390_SIGP_SET_PREFIX: 1553 irq->u.prefix.address = s390int->parm; 1554 break; 1555 case KVM_S390_SIGP_STOP: 1556 irq->u.stop.flags = s390int->parm; 1557 break; 1558 case KVM_S390_INT_EXTERNAL_CALL: 1559 if (s390int->parm & 0xffff0000) 1560 return -EINVAL; 1561 irq->u.extcall.code = s390int->parm; 1562 break; 1563 case KVM_S390_INT_EMERGENCY: 1564 if (s390int->parm & 0xffff0000) 1565 return -EINVAL; 1566 irq->u.emerg.code = s390int->parm; 1567 break; 1568 case KVM_S390_MCHK: 1569 irq->u.mchk.mcic = s390int->parm64; 1570 break; 1571 } 1572 return 0; 1573 } 1574 1575 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) 1576 { 1577 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1578 1579 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1580 } 1581 1582 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) 1583 { 1584 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1585 1586 spin_lock(&li->lock); 1587 li->irq.stop.flags = 0; 1588 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1589 spin_unlock(&li->lock); 1590 } 1591 1592 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1593 { 1594 int rc; 1595 1596 switch (irq->type) { 1597 case KVM_S390_PROGRAM_INT: 1598 rc = __inject_prog(vcpu, irq); 1599 break; 1600 case KVM_S390_SIGP_SET_PREFIX: 1601 rc = __inject_set_prefix(vcpu, irq); 1602 break; 1603 case KVM_S390_SIGP_STOP: 1604 rc = __inject_sigp_stop(vcpu, irq); 1605 break; 1606 case KVM_S390_RESTART: 1607 rc = __inject_sigp_restart(vcpu, irq); 1608 break; 1609 case KVM_S390_INT_CLOCK_COMP: 1610 rc = __inject_ckc(vcpu); 1611 break; 1612 case KVM_S390_INT_CPU_TIMER: 1613 rc = __inject_cpu_timer(vcpu); 1614 break; 1615 case KVM_S390_INT_EXTERNAL_CALL: 1616 rc = __inject_extcall(vcpu, irq); 1617 break; 1618 case KVM_S390_INT_EMERGENCY: 1619 rc = __inject_sigp_emergency(vcpu, irq); 1620 break; 1621 case KVM_S390_MCHK: 1622 rc = __inject_mchk(vcpu, irq); 1623 break; 1624 case KVM_S390_INT_PFAULT_INIT: 1625 rc = __inject_pfault_init(vcpu, irq); 1626 break; 1627 case KVM_S390_INT_VIRTIO: 1628 case KVM_S390_INT_SERVICE: 1629 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1630 default: 1631 rc = -EINVAL; 1632 } 1633 1634 return rc; 1635 } 1636 1637 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1638 { 1639 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1640 int rc; 1641 1642 spin_lock(&li->lock); 1643 rc = do_inject_vcpu(vcpu, irq); 1644 spin_unlock(&li->lock); 1645 if (!rc) 1646 kvm_s390_vcpu_wakeup(vcpu); 1647 return rc; 1648 } 1649 1650 static inline void clear_irq_list(struct list_head *_list) 1651 { 1652 struct kvm_s390_interrupt_info *inti, *n; 1653 1654 list_for_each_entry_safe(inti, n, _list, list) { 1655 list_del(&inti->list); 1656 kfree(inti); 1657 } 1658 } 1659 1660 static void inti_to_irq(struct kvm_s390_interrupt_info *inti, 1661 struct kvm_s390_irq *irq) 1662 { 1663 irq->type = inti->type; 1664 switch (inti->type) { 1665 case KVM_S390_INT_PFAULT_INIT: 1666 case KVM_S390_INT_PFAULT_DONE: 1667 case KVM_S390_INT_VIRTIO: 1668 irq->u.ext = inti->ext; 1669 break; 1670 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1671 irq->u.io = inti->io; 1672 break; 1673 } 1674 } 1675 1676 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1677 { 1678 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1679 int i; 1680 1681 spin_lock(&fi->lock); 1682 fi->pending_irqs = 0; 1683 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); 1684 memset(&fi->mchk, 0, sizeof(fi->mchk)); 1685 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1686 clear_irq_list(&fi->lists[i]); 1687 for (i = 0; i < FIRQ_MAX_COUNT; i++) 1688 fi->counters[i] = 0; 1689 spin_unlock(&fi->lock); 1690 }; 1691 1692 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) 1693 { 1694 struct kvm_s390_interrupt_info *inti; 1695 struct kvm_s390_float_interrupt *fi; 1696 struct kvm_s390_irq *buf; 1697 struct kvm_s390_irq *irq; 1698 int max_irqs; 1699 int ret = 0; 1700 int n = 0; 1701 int i; 1702 1703 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) 1704 return -EINVAL; 1705 1706 /* 1707 * We are already using -ENOMEM to signal 1708 * userspace it may retry with a bigger buffer, 1709 * so we need to use something else for this case 1710 */ 1711 buf = vzalloc(len); 1712 if (!buf) 1713 return -ENOBUFS; 1714 1715 max_irqs = len / sizeof(struct kvm_s390_irq); 1716 1717 fi = &kvm->arch.float_int; 1718 spin_lock(&fi->lock); 1719 for (i = 0; i < FIRQ_LIST_COUNT; i++) { 1720 list_for_each_entry(inti, &fi->lists[i], list) { 1721 if (n == max_irqs) { 1722 /* signal userspace to try again */ 1723 ret = -ENOMEM; 1724 goto out; 1725 } 1726 inti_to_irq(inti, &buf[n]); 1727 n++; 1728 } 1729 } 1730 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { 1731 if (n == max_irqs) { 1732 /* signal userspace to try again */ 1733 ret = -ENOMEM; 1734 goto out; 1735 } 1736 irq = (struct kvm_s390_irq *) &buf[n]; 1737 irq->type = KVM_S390_INT_SERVICE; 1738 irq->u.ext = fi->srv_signal; 1739 n++; 1740 } 1741 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 1742 if (n == max_irqs) { 1743 /* signal userspace to try again */ 1744 ret = -ENOMEM; 1745 goto out; 1746 } 1747 irq = (struct kvm_s390_irq *) &buf[n]; 1748 irq->type = KVM_S390_MCHK; 1749 irq->u.mchk = fi->mchk; 1750 n++; 1751 } 1752 1753 out: 1754 spin_unlock(&fi->lock); 1755 if (!ret && n > 0) { 1756 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) 1757 ret = -EFAULT; 1758 } 1759 vfree(buf); 1760 1761 return ret < 0 ? ret : n; 1762 } 1763 1764 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1765 { 1766 int r; 1767 1768 switch (attr->group) { 1769 case KVM_DEV_FLIC_GET_ALL_IRQS: 1770 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, 1771 attr->attr); 1772 break; 1773 default: 1774 r = -EINVAL; 1775 } 1776 1777 return r; 1778 } 1779 1780 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1781 u64 addr) 1782 { 1783 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1784 void *target = NULL; 1785 void __user *source; 1786 u64 size; 1787 1788 if (get_user(inti->type, (u64 __user *)addr)) 1789 return -EFAULT; 1790 1791 switch (inti->type) { 1792 case KVM_S390_INT_PFAULT_INIT: 1793 case KVM_S390_INT_PFAULT_DONE: 1794 case KVM_S390_INT_VIRTIO: 1795 case KVM_S390_INT_SERVICE: 1796 target = (void *) &inti->ext; 1797 source = &uptr->u.ext; 1798 size = sizeof(inti->ext); 1799 break; 1800 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1801 target = (void *) &inti->io; 1802 source = &uptr->u.io; 1803 size = sizeof(inti->io); 1804 break; 1805 case KVM_S390_MCHK: 1806 target = (void *) &inti->mchk; 1807 source = &uptr->u.mchk; 1808 size = sizeof(inti->mchk); 1809 break; 1810 default: 1811 return -EINVAL; 1812 } 1813 1814 if (copy_from_user(target, source, size)) 1815 return -EFAULT; 1816 1817 return 0; 1818 } 1819 1820 static int enqueue_floating_irq(struct kvm_device *dev, 1821 struct kvm_device_attr *attr) 1822 { 1823 struct kvm_s390_interrupt_info *inti = NULL; 1824 int r = 0; 1825 int len = attr->attr; 1826 1827 if (len % sizeof(struct kvm_s390_irq) != 0) 1828 return -EINVAL; 1829 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1830 return -EINVAL; 1831 1832 while (len >= sizeof(struct kvm_s390_irq)) { 1833 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1834 if (!inti) 1835 return -ENOMEM; 1836 1837 r = copy_irq_from_user(inti, attr->addr); 1838 if (r) { 1839 kfree(inti); 1840 return r; 1841 } 1842 r = __inject_vm(dev->kvm, inti); 1843 if (r) { 1844 kfree(inti); 1845 return r; 1846 } 1847 len -= sizeof(struct kvm_s390_irq); 1848 attr->addr += sizeof(struct kvm_s390_irq); 1849 } 1850 1851 return r; 1852 } 1853 1854 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1855 { 1856 if (id >= MAX_S390_IO_ADAPTERS) 1857 return NULL; 1858 return kvm->arch.adapters[id]; 1859 } 1860 1861 static int register_io_adapter(struct kvm_device *dev, 1862 struct kvm_device_attr *attr) 1863 { 1864 struct s390_io_adapter *adapter; 1865 struct kvm_s390_io_adapter adapter_info; 1866 1867 if (copy_from_user(&adapter_info, 1868 (void __user *)attr->addr, sizeof(adapter_info))) 1869 return -EFAULT; 1870 1871 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1872 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1873 return -EINVAL; 1874 1875 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1876 if (!adapter) 1877 return -ENOMEM; 1878 1879 INIT_LIST_HEAD(&adapter->maps); 1880 init_rwsem(&adapter->maps_lock); 1881 atomic_set(&adapter->nr_maps, 0); 1882 adapter->id = adapter_info.id; 1883 adapter->isc = adapter_info.isc; 1884 adapter->maskable = adapter_info.maskable; 1885 adapter->masked = false; 1886 adapter->swap = adapter_info.swap; 1887 dev->kvm->arch.adapters[adapter->id] = adapter; 1888 1889 return 0; 1890 } 1891 1892 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1893 { 1894 int ret; 1895 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1896 1897 if (!adapter || !adapter->maskable) 1898 return -EINVAL; 1899 ret = adapter->masked; 1900 adapter->masked = masked; 1901 return ret; 1902 } 1903 1904 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1905 { 1906 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1907 struct s390_map_info *map; 1908 int ret; 1909 1910 if (!adapter || !addr) 1911 return -EINVAL; 1912 1913 map = kzalloc(sizeof(*map), GFP_KERNEL); 1914 if (!map) { 1915 ret = -ENOMEM; 1916 goto out; 1917 } 1918 INIT_LIST_HEAD(&map->list); 1919 map->guest_addr = addr; 1920 map->addr = gmap_translate(kvm->arch.gmap, addr); 1921 if (map->addr == -EFAULT) { 1922 ret = -EFAULT; 1923 goto out; 1924 } 1925 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1926 if (ret < 0) 1927 goto out; 1928 BUG_ON(ret != 1); 1929 down_write(&adapter->maps_lock); 1930 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1931 list_add_tail(&map->list, &adapter->maps); 1932 ret = 0; 1933 } else { 1934 put_page(map->page); 1935 ret = -EINVAL; 1936 } 1937 up_write(&adapter->maps_lock); 1938 out: 1939 if (ret) 1940 kfree(map); 1941 return ret; 1942 } 1943 1944 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1945 { 1946 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1947 struct s390_map_info *map, *tmp; 1948 int found = 0; 1949 1950 if (!adapter || !addr) 1951 return -EINVAL; 1952 1953 down_write(&adapter->maps_lock); 1954 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1955 if (map->guest_addr == addr) { 1956 found = 1; 1957 atomic_dec(&adapter->nr_maps); 1958 list_del(&map->list); 1959 put_page(map->page); 1960 kfree(map); 1961 break; 1962 } 1963 } 1964 up_write(&adapter->maps_lock); 1965 1966 return found ? 0 : -EINVAL; 1967 } 1968 1969 void kvm_s390_destroy_adapters(struct kvm *kvm) 1970 { 1971 int i; 1972 struct s390_map_info *map, *tmp; 1973 1974 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 1975 if (!kvm->arch.adapters[i]) 1976 continue; 1977 list_for_each_entry_safe(map, tmp, 1978 &kvm->arch.adapters[i]->maps, list) { 1979 list_del(&map->list); 1980 put_page(map->page); 1981 kfree(map); 1982 } 1983 kfree(kvm->arch.adapters[i]); 1984 } 1985 } 1986 1987 static int modify_io_adapter(struct kvm_device *dev, 1988 struct kvm_device_attr *attr) 1989 { 1990 struct kvm_s390_io_adapter_req req; 1991 struct s390_io_adapter *adapter; 1992 int ret; 1993 1994 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 1995 return -EFAULT; 1996 1997 adapter = get_io_adapter(dev->kvm, req.id); 1998 if (!adapter) 1999 return -EINVAL; 2000 switch (req.type) { 2001 case KVM_S390_IO_ADAPTER_MASK: 2002 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 2003 if (ret > 0) 2004 ret = 0; 2005 break; 2006 case KVM_S390_IO_ADAPTER_MAP: 2007 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 2008 break; 2009 case KVM_S390_IO_ADAPTER_UNMAP: 2010 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 2011 break; 2012 default: 2013 ret = -EINVAL; 2014 } 2015 2016 return ret; 2017 } 2018 2019 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2020 { 2021 int r = 0; 2022 unsigned int i; 2023 struct kvm_vcpu *vcpu; 2024 2025 switch (attr->group) { 2026 case KVM_DEV_FLIC_ENQUEUE: 2027 r = enqueue_floating_irq(dev, attr); 2028 break; 2029 case KVM_DEV_FLIC_CLEAR_IRQS: 2030 kvm_s390_clear_float_irqs(dev->kvm); 2031 break; 2032 case KVM_DEV_FLIC_APF_ENABLE: 2033 dev->kvm->arch.gmap->pfault_enabled = 1; 2034 break; 2035 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2036 dev->kvm->arch.gmap->pfault_enabled = 0; 2037 /* 2038 * Make sure no async faults are in transition when 2039 * clearing the queues. So we don't need to worry 2040 * about late coming workers. 2041 */ 2042 synchronize_srcu(&dev->kvm->srcu); 2043 kvm_for_each_vcpu(i, vcpu, dev->kvm) 2044 kvm_clear_async_pf_completion_queue(vcpu); 2045 break; 2046 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2047 r = register_io_adapter(dev, attr); 2048 break; 2049 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2050 r = modify_io_adapter(dev, attr); 2051 break; 2052 default: 2053 r = -EINVAL; 2054 } 2055 2056 return r; 2057 } 2058 2059 static int flic_create(struct kvm_device *dev, u32 type) 2060 { 2061 if (!dev) 2062 return -EINVAL; 2063 if (dev->kvm->arch.flic) 2064 return -EINVAL; 2065 dev->kvm->arch.flic = dev; 2066 return 0; 2067 } 2068 2069 static void flic_destroy(struct kvm_device *dev) 2070 { 2071 dev->kvm->arch.flic = NULL; 2072 kfree(dev); 2073 } 2074 2075 /* s390 floating irq controller (flic) */ 2076 struct kvm_device_ops kvm_flic_ops = { 2077 .name = "kvm-flic", 2078 .get_attr = flic_get_attr, 2079 .set_attr = flic_set_attr, 2080 .create = flic_create, 2081 .destroy = flic_destroy, 2082 }; 2083 2084 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 2085 { 2086 unsigned long bit; 2087 2088 bit = bit_nr + (addr % PAGE_SIZE) * 8; 2089 2090 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 2091 } 2092 2093 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 2094 u64 addr) 2095 { 2096 struct s390_map_info *map; 2097 2098 if (!adapter) 2099 return NULL; 2100 2101 list_for_each_entry(map, &adapter->maps, list) { 2102 if (map->guest_addr == addr) 2103 return map; 2104 } 2105 return NULL; 2106 } 2107 2108 static int adapter_indicators_set(struct kvm *kvm, 2109 struct s390_io_adapter *adapter, 2110 struct kvm_s390_adapter_int *adapter_int) 2111 { 2112 unsigned long bit; 2113 int summary_set, idx; 2114 struct s390_map_info *info; 2115 void *map; 2116 2117 info = get_map_info(adapter, adapter_int->ind_addr); 2118 if (!info) 2119 return -1; 2120 map = page_address(info->page); 2121 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 2122 set_bit(bit, map); 2123 idx = srcu_read_lock(&kvm->srcu); 2124 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2125 set_page_dirty_lock(info->page); 2126 info = get_map_info(adapter, adapter_int->summary_addr); 2127 if (!info) { 2128 srcu_read_unlock(&kvm->srcu, idx); 2129 return -1; 2130 } 2131 map = page_address(info->page); 2132 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 2133 adapter->swap); 2134 summary_set = test_and_set_bit(bit, map); 2135 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2136 set_page_dirty_lock(info->page); 2137 srcu_read_unlock(&kvm->srcu, idx); 2138 return summary_set ? 0 : 1; 2139 } 2140 2141 /* 2142 * < 0 - not injected due to error 2143 * = 0 - coalesced, summary indicator already active 2144 * > 0 - injected interrupt 2145 */ 2146 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 2147 struct kvm *kvm, int irq_source_id, int level, 2148 bool line_status) 2149 { 2150 int ret; 2151 struct s390_io_adapter *adapter; 2152 2153 /* We're only interested in the 0->1 transition. */ 2154 if (!level) 2155 return 0; 2156 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 2157 if (!adapter) 2158 return -1; 2159 down_read(&adapter->maps_lock); 2160 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 2161 up_read(&adapter->maps_lock); 2162 if ((ret > 0) && !adapter->masked) { 2163 struct kvm_s390_interrupt s390int = { 2164 .type = KVM_S390_INT_IO(1, 0, 0, 0), 2165 .parm = 0, 2166 .parm64 = (adapter->isc << 27) | 0x80000000, 2167 }; 2168 ret = kvm_s390_inject_vm(kvm, &s390int); 2169 if (ret == 0) 2170 ret = 1; 2171 } 2172 return ret; 2173 } 2174 2175 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, 2176 const struct kvm_irq_routing_entry *ue) 2177 { 2178 int ret; 2179 2180 switch (ue->type) { 2181 case KVM_IRQ_ROUTING_S390_ADAPTER: 2182 e->set = set_adapter_int; 2183 e->adapter.summary_addr = ue->u.adapter.summary_addr; 2184 e->adapter.ind_addr = ue->u.adapter.ind_addr; 2185 e->adapter.summary_offset = ue->u.adapter.summary_offset; 2186 e->adapter.ind_offset = ue->u.adapter.ind_offset; 2187 e->adapter.adapter_id = ue->u.adapter.adapter_id; 2188 ret = 0; 2189 break; 2190 default: 2191 ret = -EINVAL; 2192 } 2193 2194 return ret; 2195 } 2196 2197 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 2198 int irq_source_id, int level, bool line_status) 2199 { 2200 return -EINVAL; 2201 } 2202 2203 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) 2204 { 2205 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2206 struct kvm_s390_irq *buf; 2207 int r = 0; 2208 int n; 2209 2210 buf = vmalloc(len); 2211 if (!buf) 2212 return -ENOMEM; 2213 2214 if (copy_from_user((void *) buf, irqstate, len)) { 2215 r = -EFAULT; 2216 goto out_free; 2217 } 2218 2219 /* 2220 * Don't allow setting the interrupt state 2221 * when there are already interrupts pending 2222 */ 2223 spin_lock(&li->lock); 2224 if (li->pending_irqs) { 2225 r = -EBUSY; 2226 goto out_unlock; 2227 } 2228 2229 for (n = 0; n < len / sizeof(*buf); n++) { 2230 r = do_inject_vcpu(vcpu, &buf[n]); 2231 if (r) 2232 break; 2233 } 2234 2235 out_unlock: 2236 spin_unlock(&li->lock); 2237 out_free: 2238 vfree(buf); 2239 2240 return r; 2241 } 2242 2243 static void store_local_irq(struct kvm_s390_local_interrupt *li, 2244 struct kvm_s390_irq *irq, 2245 unsigned long irq_type) 2246 { 2247 switch (irq_type) { 2248 case IRQ_PEND_MCHK_EX: 2249 case IRQ_PEND_MCHK_REP: 2250 irq->type = KVM_S390_MCHK; 2251 irq->u.mchk = li->irq.mchk; 2252 break; 2253 case IRQ_PEND_PROG: 2254 irq->type = KVM_S390_PROGRAM_INT; 2255 irq->u.pgm = li->irq.pgm; 2256 break; 2257 case IRQ_PEND_PFAULT_INIT: 2258 irq->type = KVM_S390_INT_PFAULT_INIT; 2259 irq->u.ext = li->irq.ext; 2260 break; 2261 case IRQ_PEND_EXT_EXTERNAL: 2262 irq->type = KVM_S390_INT_EXTERNAL_CALL; 2263 irq->u.extcall = li->irq.extcall; 2264 break; 2265 case IRQ_PEND_EXT_CLOCK_COMP: 2266 irq->type = KVM_S390_INT_CLOCK_COMP; 2267 break; 2268 case IRQ_PEND_EXT_CPU_TIMER: 2269 irq->type = KVM_S390_INT_CPU_TIMER; 2270 break; 2271 case IRQ_PEND_SIGP_STOP: 2272 irq->type = KVM_S390_SIGP_STOP; 2273 irq->u.stop = li->irq.stop; 2274 break; 2275 case IRQ_PEND_RESTART: 2276 irq->type = KVM_S390_RESTART; 2277 break; 2278 case IRQ_PEND_SET_PREFIX: 2279 irq->type = KVM_S390_SIGP_SET_PREFIX; 2280 irq->u.prefix = li->irq.prefix; 2281 break; 2282 } 2283 } 2284 2285 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) 2286 { 2287 int scn; 2288 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 2289 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2290 unsigned long pending_irqs; 2291 struct kvm_s390_irq irq; 2292 unsigned long irq_type; 2293 int cpuaddr; 2294 int n = 0; 2295 2296 spin_lock(&li->lock); 2297 pending_irqs = li->pending_irqs; 2298 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, 2299 sizeof(sigp_emerg_pending)); 2300 spin_unlock(&li->lock); 2301 2302 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { 2303 memset(&irq, 0, sizeof(irq)); 2304 if (irq_type == IRQ_PEND_EXT_EMERGENCY) 2305 continue; 2306 if (n + sizeof(irq) > len) 2307 return -ENOBUFS; 2308 store_local_irq(&vcpu->arch.local_int, &irq, irq_type); 2309 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2310 return -EFAULT; 2311 n += sizeof(irq); 2312 } 2313 2314 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { 2315 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { 2316 memset(&irq, 0, sizeof(irq)); 2317 if (n + sizeof(irq) > len) 2318 return -ENOBUFS; 2319 irq.type = KVM_S390_INT_EMERGENCY; 2320 irq.u.emerg.code = cpuaddr; 2321 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2322 return -EFAULT; 2323 n += sizeof(irq); 2324 } 2325 } 2326 2327 if (sca_ext_call_pending(vcpu, &scn)) { 2328 if (n + sizeof(irq) > len) 2329 return -ENOBUFS; 2330 memset(&irq, 0, sizeof(irq)); 2331 irq.type = KVM_S390_INT_EXTERNAL_CALL; 2332 irq.u.extcall.code = scn; 2333 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2334 return -EFAULT; 2335 n += sizeof(irq); 2336 } 2337 2338 return n; 2339 } 2340