1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * handling kvm guest interrupts 4 * 5 * Copyright IBM Corp. 2008, 2015 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/kvm_host.h> 12 #include <linux/hrtimer.h> 13 #include <linux/mmu_context.h> 14 #include <linux/signal.h> 15 #include <linux/slab.h> 16 #include <linux/bitmap.h> 17 #include <linux/vmalloc.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/dis.h> 20 #include <linux/uaccess.h> 21 #include <asm/sclp.h> 22 #include <asm/isc.h> 23 #include <asm/gmap.h> 24 #include <asm/switch_to.h> 25 #include <asm/nmi.h> 26 #include "kvm-s390.h" 27 #include "gaccess.h" 28 #include "trace-s390.h" 29 30 #define PFAULT_INIT 0x0600 31 #define PFAULT_DONE 0x0680 32 #define VIRTIO_PARAM 0x0d00 33 34 /* handle external calls via sigp interpretation facility */ 35 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) 36 { 37 int c, scn; 38 39 if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND)) 40 return 0; 41 42 BUG_ON(!kvm_s390_use_sca_entries()); 43 read_lock(&vcpu->kvm->arch.sca_lock); 44 if (vcpu->kvm->arch.use_esca) { 45 struct esca_block *sca = vcpu->kvm->arch.sca; 46 union esca_sigp_ctrl sigp_ctrl = 47 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 48 49 c = sigp_ctrl.c; 50 scn = sigp_ctrl.scn; 51 } else { 52 struct bsca_block *sca = vcpu->kvm->arch.sca; 53 union bsca_sigp_ctrl sigp_ctrl = 54 sca->cpu[vcpu->vcpu_id].sigp_ctrl; 55 56 c = sigp_ctrl.c; 57 scn = sigp_ctrl.scn; 58 } 59 read_unlock(&vcpu->kvm->arch.sca_lock); 60 61 if (src_id) 62 *src_id = scn; 63 64 return c; 65 } 66 67 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) 68 { 69 int expect, rc; 70 71 BUG_ON(!kvm_s390_use_sca_entries()); 72 read_lock(&vcpu->kvm->arch.sca_lock); 73 if (vcpu->kvm->arch.use_esca) { 74 struct esca_block *sca = vcpu->kvm->arch.sca; 75 union esca_sigp_ctrl *sigp_ctrl = 76 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 77 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 78 79 new_val.scn = src_id; 80 new_val.c = 1; 81 old_val.c = 0; 82 83 expect = old_val.value; 84 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 85 } else { 86 struct bsca_block *sca = vcpu->kvm->arch.sca; 87 union bsca_sigp_ctrl *sigp_ctrl = 88 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 89 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; 90 91 new_val.scn = src_id; 92 new_val.c = 1; 93 old_val.c = 0; 94 95 expect = old_val.value; 96 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value); 97 } 98 read_unlock(&vcpu->kvm->arch.sca_lock); 99 100 if (rc != expect) { 101 /* another external call is pending */ 102 return -EBUSY; 103 } 104 kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND); 105 return 0; 106 } 107 108 static void sca_clear_ext_call(struct kvm_vcpu *vcpu) 109 { 110 int rc, expect; 111 112 if (!kvm_s390_use_sca_entries()) 113 return; 114 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND); 115 read_lock(&vcpu->kvm->arch.sca_lock); 116 if (vcpu->kvm->arch.use_esca) { 117 struct esca_block *sca = vcpu->kvm->arch.sca; 118 union esca_sigp_ctrl *sigp_ctrl = 119 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 120 union esca_sigp_ctrl old = *sigp_ctrl; 121 122 expect = old.value; 123 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 124 } else { 125 struct bsca_block *sca = vcpu->kvm->arch.sca; 126 union bsca_sigp_ctrl *sigp_ctrl = 127 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); 128 union bsca_sigp_ctrl old = *sigp_ctrl; 129 130 expect = old.value; 131 rc = cmpxchg(&sigp_ctrl->value, old.value, 0); 132 } 133 read_unlock(&vcpu->kvm->arch.sca_lock); 134 WARN_ON(rc != expect); /* cannot clear? */ 135 } 136 137 int psw_extint_disabled(struct kvm_vcpu *vcpu) 138 { 139 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 140 } 141 142 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 143 { 144 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 145 } 146 147 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 148 { 149 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 150 } 151 152 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 153 { 154 return psw_extint_disabled(vcpu) && 155 psw_ioint_disabled(vcpu) && 156 psw_mchk_disabled(vcpu); 157 } 158 159 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 160 { 161 if (psw_extint_disabled(vcpu) || 162 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 163 return 0; 164 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 165 /* No timer interrupts when single stepping */ 166 return 0; 167 return 1; 168 } 169 170 static int ckc_irq_pending(struct kvm_vcpu *vcpu) 171 { 172 const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 173 const u64 ckc = vcpu->arch.sie_block->ckc; 174 175 if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { 176 if ((s64)ckc >= (s64)now) 177 return 0; 178 } else if (ckc >= now) { 179 return 0; 180 } 181 return ckc_interrupts_enabled(vcpu); 182 } 183 184 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) 185 { 186 return !psw_extint_disabled(vcpu) && 187 (vcpu->arch.sie_block->gcr[0] & 0x400ul); 188 } 189 190 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) 191 { 192 if (!cpu_timer_interrupts_enabled(vcpu)) 193 return 0; 194 return kvm_s390_get_cpu_timer(vcpu) >> 63; 195 } 196 197 static uint64_t isc_to_isc_bits(int isc) 198 { 199 return (0x80 >> isc) << 24; 200 } 201 202 static inline u32 isc_to_int_word(u8 isc) 203 { 204 return ((u32)isc << 27) | 0x80000000; 205 } 206 207 static inline u8 int_word_to_isc(u32 int_word) 208 { 209 return (int_word & 0x38000000) >> 27; 210 } 211 212 /* 213 * To use atomic bitmap functions, we have to provide a bitmap address 214 * that is u64 aligned. However, the ipm might be u32 aligned. 215 * Therefore, we logically start the bitmap at the very beginning of the 216 * struct and fixup the bit number. 217 */ 218 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE) 219 220 static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) 221 { 222 set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); 223 } 224 225 static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa) 226 { 227 return READ_ONCE(gisa->ipm); 228 } 229 230 static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) 231 { 232 clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); 233 } 234 235 static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) 236 { 237 return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); 238 } 239 240 static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu) 241 { 242 return vcpu->kvm->arch.float_int.pending_irqs | 243 vcpu->arch.local_int.pending_irqs; 244 } 245 246 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) 247 { 248 return pending_irqs_no_gisa(vcpu) | 249 kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; 250 } 251 252 static inline int isc_to_irq_type(unsigned long isc) 253 { 254 return IRQ_PEND_IO_ISC_0 - isc; 255 } 256 257 static inline int irq_type_to_isc(unsigned long irq_type) 258 { 259 return IRQ_PEND_IO_ISC_0 - irq_type; 260 } 261 262 static unsigned long disable_iscs(struct kvm_vcpu *vcpu, 263 unsigned long active_mask) 264 { 265 int i; 266 267 for (i = 0; i <= MAX_ISC; i++) 268 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) 269 active_mask &= ~(1UL << (isc_to_irq_type(i))); 270 271 return active_mask; 272 } 273 274 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) 275 { 276 unsigned long active_mask; 277 278 active_mask = pending_irqs(vcpu); 279 if (!active_mask) 280 return 0; 281 282 if (psw_extint_disabled(vcpu)) 283 active_mask &= ~IRQ_PEND_EXT_MASK; 284 if (psw_ioint_disabled(vcpu)) 285 active_mask &= ~IRQ_PEND_IO_MASK; 286 else 287 active_mask = disable_iscs(vcpu, active_mask); 288 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 289 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); 290 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) 291 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); 292 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 293 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); 294 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) 295 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); 296 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 297 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); 298 if (psw_mchk_disabled(vcpu)) 299 active_mask &= ~IRQ_PEND_MCHK_MASK; 300 /* 301 * Check both floating and local interrupt's cr14 because 302 * bit IRQ_PEND_MCHK_REP could be set in both cases. 303 */ 304 if (!(vcpu->arch.sie_block->gcr[14] & 305 (vcpu->kvm->arch.float_int.mchk.cr14 | 306 vcpu->arch.local_int.irq.mchk.cr14))) 307 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask); 308 309 /* 310 * STOP irqs will never be actively delivered. They are triggered via 311 * intercept requests and cleared when the stop intercept is performed. 312 */ 313 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask); 314 315 return active_mask; 316 } 317 318 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 319 { 320 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); 321 set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); 322 } 323 324 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 325 { 326 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); 327 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); 328 } 329 330 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 331 { 332 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT | 333 CPUSTAT_STOP_INT); 334 vcpu->arch.sie_block->lctl = 0x0000; 335 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 336 337 if (guestdbg_enabled(vcpu)) { 338 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 339 LCTL_CR10 | LCTL_CR11); 340 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 341 } 342 } 343 344 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) 345 { 346 if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK)) 347 return; 348 else if (psw_ioint_disabled(vcpu)) 349 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); 350 else 351 vcpu->arch.sie_block->lctl |= LCTL_CR6; 352 } 353 354 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) 355 { 356 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) 357 return; 358 if (psw_extint_disabled(vcpu)) 359 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); 360 else 361 vcpu->arch.sie_block->lctl |= LCTL_CR0; 362 } 363 364 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) 365 { 366 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) 367 return; 368 if (psw_mchk_disabled(vcpu)) 369 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 370 else 371 vcpu->arch.sie_block->lctl |= LCTL_CR14; 372 } 373 374 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) 375 { 376 if (kvm_s390_is_stop_irq_pending(vcpu)) 377 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); 378 } 379 380 /* Set interception request for non-deliverable interrupts */ 381 static void set_intercept_indicators(struct kvm_vcpu *vcpu) 382 { 383 set_intercept_indicators_io(vcpu); 384 set_intercept_indicators_ext(vcpu); 385 set_intercept_indicators_mchk(vcpu); 386 set_intercept_indicators_stop(vcpu); 387 } 388 389 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) 390 { 391 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 392 int rc; 393 394 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 395 0, 0); 396 397 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 398 (u16 *)__LC_EXT_INT_CODE); 399 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 400 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 402 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 403 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 404 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 405 return rc ? -EFAULT : 0; 406 } 407 408 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) 409 { 410 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 411 int rc; 412 413 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 414 0, 0); 415 416 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, 417 (u16 __user *)__LC_EXT_INT_CODE); 418 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 419 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 420 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 421 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 422 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 423 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 424 return rc ? -EFAULT : 0; 425 } 426 427 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu) 428 { 429 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 430 struct kvm_s390_ext_info ext; 431 int rc; 432 433 spin_lock(&li->lock); 434 ext = li->irq.ext; 435 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 436 li->irq.ext.ext_params2 = 0; 437 spin_unlock(&li->lock); 438 439 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx", 440 ext.ext_params2); 441 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 442 KVM_S390_INT_PFAULT_INIT, 443 0, ext.ext_params2); 444 445 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE); 446 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); 447 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 449 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 450 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 451 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2); 452 return rc ? -EFAULT : 0; 453 } 454 455 static int __write_machine_check(struct kvm_vcpu *vcpu, 456 struct kvm_s390_mchk_info *mchk) 457 { 458 unsigned long ext_sa_addr; 459 unsigned long lc; 460 freg_t fprs[NUM_FPRS]; 461 union mci mci; 462 int rc; 463 464 mci.val = mchk->mcic; 465 /* take care of lazy register loading */ 466 save_fpu_regs(); 467 save_access_regs(vcpu->run->s.regs.acrs); 468 if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) 469 save_gs_cb(current->thread.gs_cb); 470 471 /* Extended save area */ 472 rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr, 473 sizeof(unsigned long)); 474 /* Only bits 0 through 63-LC are used for address formation */ 475 lc = ext_sa_addr & MCESA_LC_MASK; 476 if (test_kvm_facility(vcpu->kvm, 133)) { 477 switch (lc) { 478 case 0: 479 case 10: 480 ext_sa_addr &= ~0x3ffUL; 481 break; 482 case 11: 483 ext_sa_addr &= ~0x7ffUL; 484 break; 485 case 12: 486 ext_sa_addr &= ~0xfffUL; 487 break; 488 default: 489 ext_sa_addr = 0; 490 break; 491 } 492 } else { 493 ext_sa_addr &= ~0x3ffUL; 494 } 495 496 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) { 497 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs, 498 512)) 499 mci.vr = 0; 500 } else { 501 mci.vr = 0; 502 } 503 if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133) 504 && (lc == 11 || lc == 12)) { 505 if (write_guest_abs(vcpu, ext_sa_addr + 1024, 506 &vcpu->run->s.regs.gscb, 32)) 507 mci.gs = 0; 508 } else { 509 mci.gs = 0; 510 } 511 512 /* General interruption information */ 513 rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID); 514 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 515 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 516 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 517 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 518 rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE); 519 520 /* Register-save areas */ 521 if (MACHINE_HAS_VX) { 522 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); 523 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128); 524 } else { 525 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, 526 vcpu->run->s.regs.fprs, 128); 527 } 528 rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA, 529 vcpu->run->s.regs.gprs, 128); 530 rc |= put_guest_lc(vcpu, current->thread.fpu.fpc, 531 (u32 __user *) __LC_FP_CREG_SAVE_AREA); 532 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, 533 (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA); 534 rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu), 535 (u64 __user *) __LC_CPU_TIMER_SAVE_AREA); 536 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8, 537 (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA); 538 rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA, 539 &vcpu->run->s.regs.acrs, 64); 540 rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA, 541 &vcpu->arch.sie_block->gcr, 128); 542 543 /* Extended interruption information */ 544 rc |= put_guest_lc(vcpu, mchk->ext_damage_code, 545 (u32 __user *) __LC_EXT_DAMAGE_CODE); 546 rc |= put_guest_lc(vcpu, mchk->failing_storage_address, 547 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR); 548 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout, 549 sizeof(mchk->fixed_logout)); 550 return rc ? -EFAULT : 0; 551 } 552 553 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) 554 { 555 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 556 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 557 struct kvm_s390_mchk_info mchk = {}; 558 int deliver = 0; 559 int rc = 0; 560 561 spin_lock(&fi->lock); 562 spin_lock(&li->lock); 563 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) || 564 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) { 565 /* 566 * If there was an exigent machine check pending, then any 567 * repressible machine checks that might have been pending 568 * are indicated along with it, so always clear bits for 569 * repressible and exigent interrupts 570 */ 571 mchk = li->irq.mchk; 572 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 573 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 574 memset(&li->irq.mchk, 0, sizeof(mchk)); 575 deliver = 1; 576 } 577 /* 578 * We indicate floating repressible conditions along with 579 * other pending conditions. Channel Report Pending and Channel 580 * Subsystem damage are the only two and and are indicated by 581 * bits in mcic and masked in cr14. 582 */ 583 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 584 mchk.mcic |= fi->mchk.mcic; 585 mchk.cr14 |= fi->mchk.cr14; 586 memset(&fi->mchk, 0, sizeof(mchk)); 587 deliver = 1; 588 } 589 spin_unlock(&li->lock); 590 spin_unlock(&fi->lock); 591 592 if (deliver) { 593 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx", 594 mchk.mcic); 595 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 596 KVM_S390_MCHK, 597 mchk.cr14, mchk.mcic); 598 rc = __write_machine_check(vcpu, &mchk); 599 } 600 return rc; 601 } 602 603 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) 604 { 605 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 606 int rc; 607 608 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); 609 vcpu->stat.deliver_restart_signal++; 610 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 611 612 rc = write_guest_lc(vcpu, 613 offsetof(struct lowcore, restart_old_psw), 614 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 615 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), 616 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 617 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); 618 return rc ? -EFAULT : 0; 619 } 620 621 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) 622 { 623 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 624 struct kvm_s390_prefix_info prefix; 625 626 spin_lock(&li->lock); 627 prefix = li->irq.prefix; 628 li->irq.prefix.address = 0; 629 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 630 spin_unlock(&li->lock); 631 632 vcpu->stat.deliver_prefix_signal++; 633 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 634 KVM_S390_SIGP_SET_PREFIX, 635 prefix.address, 0); 636 637 kvm_s390_set_prefix(vcpu, prefix.address); 638 return 0; 639 } 640 641 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu) 642 { 643 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 644 int rc; 645 int cpu_addr; 646 647 spin_lock(&li->lock); 648 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS); 649 clear_bit(cpu_addr, li->sigp_emerg_pending); 650 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS)) 651 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 652 spin_unlock(&li->lock); 653 654 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg"); 655 vcpu->stat.deliver_emergency_signal++; 656 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 657 cpu_addr, 0); 658 659 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, 660 (u16 *)__LC_EXT_INT_CODE); 661 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR); 662 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 663 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 664 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 665 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 666 return rc ? -EFAULT : 0; 667 } 668 669 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu) 670 { 671 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 672 struct kvm_s390_extcall_info extcall; 673 int rc; 674 675 spin_lock(&li->lock); 676 extcall = li->irq.extcall; 677 li->irq.extcall.code = 0; 678 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 679 spin_unlock(&li->lock); 680 681 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call"); 682 vcpu->stat.deliver_external_call++; 683 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 684 KVM_S390_INT_EXTERNAL_CALL, 685 extcall.code, 0); 686 687 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, 688 (u16 *)__LC_EXT_INT_CODE); 689 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR); 690 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 691 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 692 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, 693 sizeof(psw_t)); 694 return rc ? -EFAULT : 0; 695 } 696 697 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) 698 { 699 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 700 struct kvm_s390_pgm_info pgm_info; 701 int rc = 0, nullifying = false; 702 u16 ilen; 703 704 spin_lock(&li->lock); 705 pgm_info = li->irq.pgm; 706 clear_bit(IRQ_PEND_PROG, &li->pending_irqs); 707 memset(&li->irq.pgm, 0, sizeof(pgm_info)); 708 spin_unlock(&li->lock); 709 710 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK; 711 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d", 712 pgm_info.code, ilen); 713 vcpu->stat.deliver_program_int++; 714 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 715 pgm_info.code, 0); 716 717 switch (pgm_info.code & ~PGM_PER) { 718 case PGM_AFX_TRANSLATION: 719 case PGM_ASX_TRANSLATION: 720 case PGM_EX_TRANSLATION: 721 case PGM_LFX_TRANSLATION: 722 case PGM_LSTE_SEQUENCE: 723 case PGM_LSX_TRANSLATION: 724 case PGM_LX_TRANSLATION: 725 case PGM_PRIMARY_AUTHORITY: 726 case PGM_SECONDARY_AUTHORITY: 727 nullifying = true; 728 /* fall through */ 729 case PGM_SPACE_SWITCH: 730 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 731 (u64 *)__LC_TRANS_EXC_CODE); 732 break; 733 case PGM_ALEN_TRANSLATION: 734 case PGM_ALE_SEQUENCE: 735 case PGM_ASTE_INSTANCE: 736 case PGM_ASTE_SEQUENCE: 737 case PGM_ASTE_VALIDITY: 738 case PGM_EXTENDED_AUTHORITY: 739 rc = put_guest_lc(vcpu, pgm_info.exc_access_id, 740 (u8 *)__LC_EXC_ACCESS_ID); 741 nullifying = true; 742 break; 743 case PGM_ASCE_TYPE: 744 case PGM_PAGE_TRANSLATION: 745 case PGM_REGION_FIRST_TRANS: 746 case PGM_REGION_SECOND_TRANS: 747 case PGM_REGION_THIRD_TRANS: 748 case PGM_SEGMENT_TRANSLATION: 749 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 750 (u64 *)__LC_TRANS_EXC_CODE); 751 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 752 (u8 *)__LC_EXC_ACCESS_ID); 753 rc |= put_guest_lc(vcpu, pgm_info.op_access_id, 754 (u8 *)__LC_OP_ACCESS_ID); 755 nullifying = true; 756 break; 757 case PGM_MONITOR: 758 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, 759 (u16 *)__LC_MON_CLASS_NR); 760 rc |= put_guest_lc(vcpu, pgm_info.mon_code, 761 (u64 *)__LC_MON_CODE); 762 break; 763 case PGM_VECTOR_PROCESSING: 764 case PGM_DATA: 765 rc = put_guest_lc(vcpu, pgm_info.data_exc_code, 766 (u32 *)__LC_DATA_EXC_CODE); 767 break; 768 case PGM_PROTECTION: 769 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, 770 (u64 *)__LC_TRANS_EXC_CODE); 771 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, 772 (u8 *)__LC_EXC_ACCESS_ID); 773 break; 774 case PGM_STACK_FULL: 775 case PGM_STACK_EMPTY: 776 case PGM_STACK_SPECIFICATION: 777 case PGM_STACK_TYPE: 778 case PGM_STACK_OPERATION: 779 case PGM_TRACE_TABEL: 780 case PGM_CRYPTO_OPERATION: 781 nullifying = true; 782 break; 783 } 784 785 if (pgm_info.code & PGM_PER) { 786 rc |= put_guest_lc(vcpu, pgm_info.per_code, 787 (u8 *) __LC_PER_CODE); 788 rc |= put_guest_lc(vcpu, pgm_info.per_atmid, 789 (u8 *)__LC_PER_ATMID); 790 rc |= put_guest_lc(vcpu, pgm_info.per_address, 791 (u64 *) __LC_PER_ADDRESS); 792 rc |= put_guest_lc(vcpu, pgm_info.per_access_id, 793 (u8 *) __LC_PER_ACCESS_ID); 794 } 795 796 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND)) 797 kvm_s390_rewind_psw(vcpu, ilen); 798 799 /* bit 1+2 of the target are the ilc, so we can directly use ilen */ 800 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC); 801 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, 802 (u64 *) __LC_LAST_BREAK); 803 rc |= put_guest_lc(vcpu, pgm_info.code, 804 (u16 *)__LC_PGM_INT_CODE); 805 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 806 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 807 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 808 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 809 return rc ? -EFAULT : 0; 810 } 811 812 static int __must_check __deliver_service(struct kvm_vcpu *vcpu) 813 { 814 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 815 struct kvm_s390_ext_info ext; 816 int rc = 0; 817 818 spin_lock(&fi->lock); 819 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { 820 spin_unlock(&fi->lock); 821 return 0; 822 } 823 ext = fi->srv_signal; 824 memset(&fi->srv_signal, 0, sizeof(ext)); 825 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 826 spin_unlock(&fi->lock); 827 828 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", 829 ext.ext_params); 830 vcpu->stat.deliver_service_signal++; 831 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, 832 ext.ext_params, 0); 833 834 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); 835 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); 836 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 837 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 838 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 839 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 840 rc |= put_guest_lc(vcpu, ext.ext_params, 841 (u32 *)__LC_EXT_PARAMS); 842 843 return rc ? -EFAULT : 0; 844 } 845 846 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) 847 { 848 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 849 struct kvm_s390_interrupt_info *inti; 850 int rc = 0; 851 852 spin_lock(&fi->lock); 853 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT], 854 struct kvm_s390_interrupt_info, 855 list); 856 if (inti) { 857 list_del(&inti->list); 858 fi->counters[FIRQ_CNTR_PFAULT] -= 1; 859 } 860 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT])) 861 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 862 spin_unlock(&fi->lock); 863 864 if (inti) { 865 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 866 KVM_S390_INT_PFAULT_DONE, 0, 867 inti->ext.ext_params2); 868 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx", 869 inti->ext.ext_params2); 870 871 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 872 (u16 *)__LC_EXT_INT_CODE); 873 rc |= put_guest_lc(vcpu, PFAULT_DONE, 874 (u16 *)__LC_EXT_CPU_ADDR); 875 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 876 &vcpu->arch.sie_block->gpsw, 877 sizeof(psw_t)); 878 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 879 &vcpu->arch.sie_block->gpsw, 880 sizeof(psw_t)); 881 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 882 (u64 *)__LC_EXT_PARAMS2); 883 kfree(inti); 884 } 885 return rc ? -EFAULT : 0; 886 } 887 888 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) 889 { 890 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 891 struct kvm_s390_interrupt_info *inti; 892 int rc = 0; 893 894 spin_lock(&fi->lock); 895 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO], 896 struct kvm_s390_interrupt_info, 897 list); 898 if (inti) { 899 VCPU_EVENT(vcpu, 4, 900 "deliver: virtio parm: 0x%x,parm64: 0x%llx", 901 inti->ext.ext_params, inti->ext.ext_params2); 902 vcpu->stat.deliver_virtio_interrupt++; 903 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 904 inti->type, 905 inti->ext.ext_params, 906 inti->ext.ext_params2); 907 list_del(&inti->list); 908 fi->counters[FIRQ_CNTR_VIRTIO] -= 1; 909 } 910 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO])) 911 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 912 spin_unlock(&fi->lock); 913 914 if (inti) { 915 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 916 (u16 *)__LC_EXT_INT_CODE); 917 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, 918 (u16 *)__LC_EXT_CPU_ADDR); 919 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 920 &vcpu->arch.sie_block->gpsw, 921 sizeof(psw_t)); 922 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 923 &vcpu->arch.sie_block->gpsw, 924 sizeof(psw_t)); 925 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 926 (u32 *)__LC_EXT_PARAMS); 927 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 928 (u64 *)__LC_EXT_PARAMS2); 929 kfree(inti); 930 } 931 return rc ? -EFAULT : 0; 932 } 933 934 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io) 935 { 936 int rc; 937 938 rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID); 939 rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR); 940 rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM); 941 rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD); 942 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 943 &vcpu->arch.sie_block->gpsw, 944 sizeof(psw_t)); 945 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 946 &vcpu->arch.sie_block->gpsw, 947 sizeof(psw_t)); 948 return rc ? -EFAULT : 0; 949 } 950 951 static int __must_check __deliver_io(struct kvm_vcpu *vcpu, 952 unsigned long irq_type) 953 { 954 struct list_head *isc_list; 955 struct kvm_s390_float_interrupt *fi; 956 struct kvm_s390_interrupt_info *inti = NULL; 957 struct kvm_s390_io_info io; 958 u32 isc; 959 int rc = 0; 960 961 fi = &vcpu->kvm->arch.float_int; 962 963 spin_lock(&fi->lock); 964 isc = irq_type_to_isc(irq_type); 965 isc_list = &fi->lists[isc]; 966 inti = list_first_entry_or_null(isc_list, 967 struct kvm_s390_interrupt_info, 968 list); 969 if (inti) { 970 if (inti->type & KVM_S390_INT_IO_AI_MASK) 971 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)"); 972 else 973 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x", 974 inti->io.subchannel_id >> 8, 975 inti->io.subchannel_id >> 1 & 0x3, 976 inti->io.subchannel_nr); 977 978 vcpu->stat.deliver_io_int++; 979 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 980 inti->type, 981 ((__u32)inti->io.subchannel_id << 16) | 982 inti->io.subchannel_nr, 983 ((__u64)inti->io.io_int_parm << 32) | 984 inti->io.io_int_word); 985 list_del(&inti->list); 986 fi->counters[FIRQ_CNTR_IO] -= 1; 987 } 988 if (list_empty(isc_list)) 989 clear_bit(irq_type, &fi->pending_irqs); 990 spin_unlock(&fi->lock); 991 992 if (inti) { 993 rc = __do_deliver_io(vcpu, &(inti->io)); 994 kfree(inti); 995 goto out; 996 } 997 998 if (vcpu->kvm->arch.gisa && 999 kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) { 1000 /* 1001 * in case an adapter interrupt was not delivered 1002 * in SIE context KVM will handle the delivery 1003 */ 1004 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc); 1005 memset(&io, 0, sizeof(io)); 1006 io.io_int_word = isc_to_int_word(isc); 1007 vcpu->stat.deliver_io_int++; 1008 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, 1009 KVM_S390_INT_IO(1, 0, 0, 0), 1010 ((__u32)io.subchannel_id << 16) | 1011 io.subchannel_nr, 1012 ((__u64)io.io_int_parm << 32) | 1013 io.io_int_word); 1014 rc = __do_deliver_io(vcpu, &io); 1015 } 1016 out: 1017 return rc; 1018 } 1019 1020 /* Check whether an external call is pending (deliverable or not) */ 1021 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) 1022 { 1023 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1024 1025 if (!sclp.has_sigpif) 1026 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); 1027 1028 return sca_ext_call_pending(vcpu, NULL); 1029 } 1030 1031 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) 1032 { 1033 if (deliverable_irqs(vcpu)) 1034 return 1; 1035 1036 if (kvm_cpu_has_pending_timer(vcpu)) 1037 return 1; 1038 1039 /* external call pending and deliverable */ 1040 if (kvm_s390_ext_call_pending(vcpu) && 1041 !psw_extint_disabled(vcpu) && 1042 (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) 1043 return 1; 1044 1045 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) 1046 return 1; 1047 return 0; 1048 } 1049 1050 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 1051 { 1052 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu); 1053 } 1054 1055 static u64 __calculate_sltime(struct kvm_vcpu *vcpu) 1056 { 1057 const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); 1058 const u64 ckc = vcpu->arch.sie_block->ckc; 1059 u64 cputm, sltime = 0; 1060 1061 if (ckc_interrupts_enabled(vcpu)) { 1062 if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { 1063 if ((s64)now < (s64)ckc) 1064 sltime = tod_to_ns((s64)ckc - (s64)now); 1065 } else if (now < ckc) { 1066 sltime = tod_to_ns(ckc - now); 1067 } 1068 /* already expired */ 1069 if (!sltime) 1070 return 0; 1071 if (cpu_timer_interrupts_enabled(vcpu)) { 1072 cputm = kvm_s390_get_cpu_timer(vcpu); 1073 /* already expired? */ 1074 if (cputm >> 63) 1075 return 0; 1076 return min(sltime, tod_to_ns(cputm)); 1077 } 1078 } else if (cpu_timer_interrupts_enabled(vcpu)) { 1079 sltime = kvm_s390_get_cpu_timer(vcpu); 1080 /* already expired? */ 1081 if (sltime >> 63) 1082 return 0; 1083 } 1084 return sltime; 1085 } 1086 1087 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 1088 { 1089 u64 sltime; 1090 1091 vcpu->stat.exit_wait_state++; 1092 1093 /* fast path */ 1094 if (kvm_arch_vcpu_runnable(vcpu)) 1095 return 0; 1096 1097 if (psw_interrupts_disabled(vcpu)) { 1098 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 1099 return -EOPNOTSUPP; /* disabled wait */ 1100 } 1101 1102 if (!ckc_interrupts_enabled(vcpu) && 1103 !cpu_timer_interrupts_enabled(vcpu)) { 1104 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 1105 __set_cpu_idle(vcpu); 1106 goto no_timer; 1107 } 1108 1109 sltime = __calculate_sltime(vcpu); 1110 if (!sltime) 1111 return 0; 1112 1113 __set_cpu_idle(vcpu); 1114 hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL); 1115 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); 1116 no_timer: 1117 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1118 kvm_vcpu_block(vcpu); 1119 __unset_cpu_idle(vcpu); 1120 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1121 1122 hrtimer_cancel(&vcpu->arch.ckc_timer); 1123 return 0; 1124 } 1125 1126 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) 1127 { 1128 /* 1129 * We cannot move this into the if, as the CPU might be already 1130 * in kvm_vcpu_block without having the waitqueue set (polling) 1131 */ 1132 vcpu->valid_wakeup = true; 1133 /* 1134 * This is mostly to document, that the read in swait_active could 1135 * be moved before other stores, leading to subtle races. 1136 * All current users do not store or use an atomic like update 1137 */ 1138 smp_mb__after_atomic(); 1139 if (swait_active(&vcpu->wq)) { 1140 /* 1141 * The vcpu gave up the cpu voluntarily, mark it as a good 1142 * yield-candidate. 1143 */ 1144 vcpu->preempted = true; 1145 swake_up(&vcpu->wq); 1146 vcpu->stat.halt_wakeup++; 1147 } 1148 /* 1149 * The VCPU might not be sleeping but is executing the VSIE. Let's 1150 * kick it, so it leaves the SIE to process the request. 1151 */ 1152 kvm_s390_vsie_kick(vcpu); 1153 } 1154 1155 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 1156 { 1157 struct kvm_vcpu *vcpu; 1158 u64 sltime; 1159 1160 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 1161 sltime = __calculate_sltime(vcpu); 1162 1163 /* 1164 * If the monotonic clock runs faster than the tod clock we might be 1165 * woken up too early and have to go back to sleep to avoid deadlocks. 1166 */ 1167 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime))) 1168 return HRTIMER_RESTART; 1169 kvm_s390_vcpu_wakeup(vcpu); 1170 return HRTIMER_NORESTART; 1171 } 1172 1173 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 1174 { 1175 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1176 1177 spin_lock(&li->lock); 1178 li->pending_irqs = 0; 1179 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS); 1180 memset(&li->irq, 0, sizeof(li->irq)); 1181 spin_unlock(&li->lock); 1182 1183 sca_clear_ext_call(vcpu); 1184 } 1185 1186 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 1187 { 1188 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1189 int rc = 0; 1190 unsigned long irq_type; 1191 unsigned long irqs; 1192 1193 __reset_intercept_indicators(vcpu); 1194 1195 /* pending ckc conditions might have been invalidated */ 1196 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1197 if (ckc_irq_pending(vcpu)) 1198 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1199 1200 /* pending cpu timer conditions might have been invalidated */ 1201 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1202 if (cpu_timer_irq_pending(vcpu)) 1203 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1204 1205 while ((irqs = deliverable_irqs(vcpu)) && !rc) { 1206 /* bits are in the reverse order of interrupt priority */ 1207 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); 1208 switch (irq_type) { 1209 case IRQ_PEND_IO_ISC_0: 1210 case IRQ_PEND_IO_ISC_1: 1211 case IRQ_PEND_IO_ISC_2: 1212 case IRQ_PEND_IO_ISC_3: 1213 case IRQ_PEND_IO_ISC_4: 1214 case IRQ_PEND_IO_ISC_5: 1215 case IRQ_PEND_IO_ISC_6: 1216 case IRQ_PEND_IO_ISC_7: 1217 rc = __deliver_io(vcpu, irq_type); 1218 break; 1219 case IRQ_PEND_MCHK_EX: 1220 case IRQ_PEND_MCHK_REP: 1221 rc = __deliver_machine_check(vcpu); 1222 break; 1223 case IRQ_PEND_PROG: 1224 rc = __deliver_prog(vcpu); 1225 break; 1226 case IRQ_PEND_EXT_EMERGENCY: 1227 rc = __deliver_emergency_signal(vcpu); 1228 break; 1229 case IRQ_PEND_EXT_EXTERNAL: 1230 rc = __deliver_external_call(vcpu); 1231 break; 1232 case IRQ_PEND_EXT_CLOCK_COMP: 1233 rc = __deliver_ckc(vcpu); 1234 break; 1235 case IRQ_PEND_EXT_CPU_TIMER: 1236 rc = __deliver_cpu_timer(vcpu); 1237 break; 1238 case IRQ_PEND_RESTART: 1239 rc = __deliver_restart(vcpu); 1240 break; 1241 case IRQ_PEND_SET_PREFIX: 1242 rc = __deliver_set_prefix(vcpu); 1243 break; 1244 case IRQ_PEND_PFAULT_INIT: 1245 rc = __deliver_pfault_init(vcpu); 1246 break; 1247 case IRQ_PEND_EXT_SERVICE: 1248 rc = __deliver_service(vcpu); 1249 break; 1250 case IRQ_PEND_PFAULT_DONE: 1251 rc = __deliver_pfault_done(vcpu); 1252 break; 1253 case IRQ_PEND_VIRTIO: 1254 rc = __deliver_virtio(vcpu); 1255 break; 1256 default: 1257 WARN_ONCE(1, "Unknown pending irq type %ld", irq_type); 1258 clear_bit(irq_type, &li->pending_irqs); 1259 } 1260 } 1261 1262 set_intercept_indicators(vcpu); 1263 1264 return rc; 1265 } 1266 1267 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1268 { 1269 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1270 1271 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code); 1272 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 1273 irq->u.pgm.code, 0); 1274 1275 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) { 1276 /* auto detection if no valid ILC was given */ 1277 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK; 1278 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu); 1279 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID; 1280 } 1281 1282 if (irq->u.pgm.code == PGM_PER) { 1283 li->irq.pgm.code |= PGM_PER; 1284 li->irq.pgm.flags = irq->u.pgm.flags; 1285 /* only modify PER related information */ 1286 li->irq.pgm.per_address = irq->u.pgm.per_address; 1287 li->irq.pgm.per_code = irq->u.pgm.per_code; 1288 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; 1289 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; 1290 } else if (!(irq->u.pgm.code & PGM_PER)) { 1291 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | 1292 irq->u.pgm.code; 1293 li->irq.pgm.flags = irq->u.pgm.flags; 1294 /* only modify non-PER information */ 1295 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; 1296 li->irq.pgm.mon_code = irq->u.pgm.mon_code; 1297 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; 1298 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; 1299 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; 1300 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; 1301 } else { 1302 li->irq.pgm = irq->u.pgm; 1303 } 1304 set_bit(IRQ_PEND_PROG, &li->pending_irqs); 1305 return 0; 1306 } 1307 1308 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1309 { 1310 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1311 1312 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx", 1313 irq->u.ext.ext_params2); 1314 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT, 1315 irq->u.ext.ext_params, 1316 irq->u.ext.ext_params2); 1317 1318 li->irq.ext = irq->u.ext; 1319 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1320 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); 1321 return 0; 1322 } 1323 1324 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1325 { 1326 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1327 struct kvm_s390_extcall_info *extcall = &li->irq.extcall; 1328 uint16_t src_id = irq->u.extcall.code; 1329 1330 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u", 1331 src_id); 1332 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, 1333 src_id, 0); 1334 1335 /* sending vcpu invalid */ 1336 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) 1337 return -EINVAL; 1338 1339 if (sclp.has_sigpif) 1340 return sca_inject_ext_call(vcpu, src_id); 1341 1342 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1343 return -EBUSY; 1344 *extcall = irq->u.extcall; 1345 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); 1346 return 0; 1347 } 1348 1349 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1350 { 1351 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1352 struct kvm_s390_prefix_info *prefix = &li->irq.prefix; 1353 1354 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x", 1355 irq->u.prefix.address); 1356 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, 1357 irq->u.prefix.address, 0); 1358 1359 if (!is_vcpu_stopped(vcpu)) 1360 return -EBUSY; 1361 1362 *prefix = irq->u.prefix; 1363 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); 1364 return 0; 1365 } 1366 1367 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS) 1368 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1369 { 1370 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1371 struct kvm_s390_stop_info *stop = &li->irq.stop; 1372 int rc = 0; 1373 1374 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0); 1375 1376 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) 1377 return -EINVAL; 1378 1379 if (is_vcpu_stopped(vcpu)) { 1380 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS) 1381 rc = kvm_s390_store_status_unloaded(vcpu, 1382 KVM_S390_STORE_STATUS_NOADDR); 1383 return rc; 1384 } 1385 1386 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) 1387 return -EBUSY; 1388 stop->flags = irq->u.stop.flags; 1389 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); 1390 return 0; 1391 } 1392 1393 static int __inject_sigp_restart(struct kvm_vcpu *vcpu, 1394 struct kvm_s390_irq *irq) 1395 { 1396 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1397 1398 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int"); 1399 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); 1400 1401 set_bit(IRQ_PEND_RESTART, &li->pending_irqs); 1402 return 0; 1403 } 1404 1405 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, 1406 struct kvm_s390_irq *irq) 1407 { 1408 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1409 1410 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u", 1411 irq->u.emerg.code); 1412 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1413 irq->u.emerg.code, 0); 1414 1415 /* sending vcpu invalid */ 1416 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) 1417 return -EINVAL; 1418 1419 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1420 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1421 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); 1422 return 0; 1423 } 1424 1425 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1426 { 1427 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1428 struct kvm_s390_mchk_info *mchk = &li->irq.mchk; 1429 1430 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx", 1431 irq->u.mchk.mcic); 1432 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, 1433 irq->u.mchk.mcic); 1434 1435 /* 1436 * Because repressible machine checks can be indicated along with 1437 * exigent machine checks (PoP, Chapter 11, Interruption action) 1438 * we need to combine cr14, mcic and external damage code. 1439 * Failing storage address and the logout area should not be or'ed 1440 * together, we just indicate the last occurrence of the corresponding 1441 * machine check 1442 */ 1443 mchk->cr14 |= irq->u.mchk.cr14; 1444 mchk->mcic |= irq->u.mchk.mcic; 1445 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code; 1446 mchk->failing_storage_address = irq->u.mchk.failing_storage_address; 1447 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout, 1448 sizeof(mchk->fixed_logout)); 1449 if (mchk->mcic & MCHK_EX_MASK) 1450 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs); 1451 else if (mchk->mcic & MCHK_REP_MASK) 1452 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs); 1453 return 0; 1454 } 1455 1456 static int __inject_ckc(struct kvm_vcpu *vcpu) 1457 { 1458 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1459 1460 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external"); 1461 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, 1462 0, 0); 1463 1464 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1465 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); 1466 return 0; 1467 } 1468 1469 static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 1470 { 1471 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1472 1473 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external"); 1474 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, 1475 0, 0); 1476 1477 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1478 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); 1479 return 0; 1480 } 1481 1482 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, 1483 int isc, u32 schid) 1484 { 1485 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1486 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1487 struct kvm_s390_interrupt_info *iter; 1488 u16 id = (schid & 0xffff0000U) >> 16; 1489 u16 nr = schid & 0x0000ffffU; 1490 1491 spin_lock(&fi->lock); 1492 list_for_each_entry(iter, isc_list, list) { 1493 if (schid && (id != iter->io.subchannel_id || 1494 nr != iter->io.subchannel_nr)) 1495 continue; 1496 /* found an appropriate entry */ 1497 list_del_init(&iter->list); 1498 fi->counters[FIRQ_CNTR_IO] -= 1; 1499 if (list_empty(isc_list)) 1500 clear_bit(isc_to_irq_type(isc), &fi->pending_irqs); 1501 spin_unlock(&fi->lock); 1502 return iter; 1503 } 1504 spin_unlock(&fi->lock); 1505 return NULL; 1506 } 1507 1508 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm, 1509 u64 isc_mask, u32 schid) 1510 { 1511 struct kvm_s390_interrupt_info *inti = NULL; 1512 int isc; 1513 1514 for (isc = 0; isc <= MAX_ISC && !inti; isc++) { 1515 if (isc_mask & isc_to_isc_bits(isc)) 1516 inti = get_io_int(kvm, isc, schid); 1517 } 1518 return inti; 1519 } 1520 1521 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid) 1522 { 1523 unsigned long active_mask; 1524 int isc; 1525 1526 if (schid) 1527 goto out; 1528 if (!kvm->arch.gisa) 1529 goto out; 1530 1531 active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32; 1532 while (active_mask) { 1533 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1); 1534 if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc)) 1535 return isc; 1536 clear_bit_inv(isc, &active_mask); 1537 } 1538 out: 1539 return -EINVAL; 1540 } 1541 1542 /* 1543 * Dequeue and return an I/O interrupt matching any of the interruption 1544 * subclasses as designated by the isc mask in cr6 and the schid (if != 0). 1545 * Take into account the interrupts pending in the interrupt list and in GISA. 1546 * 1547 * Note that for a guest that does not enable I/O interrupts 1548 * but relies on TPI, a flood of classic interrupts may starve 1549 * out adapter interrupts on the same isc. Linux does not do 1550 * that, and it is possible to work around the issue by configuring 1551 * different iscs for classic and adapter interrupts in the guest, 1552 * but we may want to revisit this in the future. 1553 */ 1554 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 1555 u64 isc_mask, u32 schid) 1556 { 1557 struct kvm_s390_interrupt_info *inti, *tmp_inti; 1558 int isc; 1559 1560 inti = get_top_io_int(kvm, isc_mask, schid); 1561 1562 isc = get_top_gisa_isc(kvm, isc_mask, schid); 1563 if (isc < 0) 1564 /* no AI in GISA */ 1565 goto out; 1566 1567 if (!inti) 1568 /* AI in GISA but no classical IO int */ 1569 goto gisa_out; 1570 1571 /* both types of interrupts present */ 1572 if (int_word_to_isc(inti->io.io_int_word) <= isc) { 1573 /* classical IO int with higher priority */ 1574 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); 1575 goto out; 1576 } 1577 gisa_out: 1578 tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1579 if (tmp_inti) { 1580 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0); 1581 tmp_inti->io.io_int_word = isc_to_int_word(isc); 1582 if (inti) 1583 kvm_s390_reinject_io_int(kvm, inti); 1584 inti = tmp_inti; 1585 } else 1586 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); 1587 out: 1588 return inti; 1589 } 1590 1591 #define SCCB_MASK 0xFFFFFFF8 1592 #define SCCB_EVENT_PENDING 0x3 1593 1594 static int __inject_service(struct kvm *kvm, 1595 struct kvm_s390_interrupt_info *inti) 1596 { 1597 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1598 1599 spin_lock(&fi->lock); 1600 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; 1601 /* 1602 * Early versions of the QEMU s390 bios will inject several 1603 * service interrupts after another without handling a 1604 * condition code indicating busy. 1605 * We will silently ignore those superfluous sccb values. 1606 * A future version of QEMU will take care of serialization 1607 * of servc requests 1608 */ 1609 if (fi->srv_signal.ext_params & SCCB_MASK) 1610 goto out; 1611 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK; 1612 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); 1613 out: 1614 spin_unlock(&fi->lock); 1615 kfree(inti); 1616 return 0; 1617 } 1618 1619 static int __inject_virtio(struct kvm *kvm, 1620 struct kvm_s390_interrupt_info *inti) 1621 { 1622 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1623 1624 spin_lock(&fi->lock); 1625 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) { 1626 spin_unlock(&fi->lock); 1627 return -EBUSY; 1628 } 1629 fi->counters[FIRQ_CNTR_VIRTIO] += 1; 1630 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]); 1631 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs); 1632 spin_unlock(&fi->lock); 1633 return 0; 1634 } 1635 1636 static int __inject_pfault_done(struct kvm *kvm, 1637 struct kvm_s390_interrupt_info *inti) 1638 { 1639 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1640 1641 spin_lock(&fi->lock); 1642 if (fi->counters[FIRQ_CNTR_PFAULT] >= 1643 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) { 1644 spin_unlock(&fi->lock); 1645 return -EBUSY; 1646 } 1647 fi->counters[FIRQ_CNTR_PFAULT] += 1; 1648 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]); 1649 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs); 1650 spin_unlock(&fi->lock); 1651 return 0; 1652 } 1653 1654 #define CR_PENDING_SUBCLASS 28 1655 static int __inject_float_mchk(struct kvm *kvm, 1656 struct kvm_s390_interrupt_info *inti) 1657 { 1658 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1659 1660 spin_lock(&fi->lock); 1661 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS); 1662 fi->mchk.mcic |= inti->mchk.mcic; 1663 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs); 1664 spin_unlock(&fi->lock); 1665 kfree(inti); 1666 return 0; 1667 } 1668 1669 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1670 { 1671 struct kvm_s390_float_interrupt *fi; 1672 struct list_head *list; 1673 int isc; 1674 1675 isc = int_word_to_isc(inti->io.io_int_word); 1676 1677 if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { 1678 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc); 1679 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); 1680 kfree(inti); 1681 return 0; 1682 } 1683 1684 fi = &kvm->arch.float_int; 1685 spin_lock(&fi->lock); 1686 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { 1687 spin_unlock(&fi->lock); 1688 return -EBUSY; 1689 } 1690 fi->counters[FIRQ_CNTR_IO] += 1; 1691 1692 if (inti->type & KVM_S390_INT_IO_AI_MASK) 1693 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)"); 1694 else 1695 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x", 1696 inti->io.subchannel_id >> 8, 1697 inti->io.subchannel_id >> 1 & 0x3, 1698 inti->io.subchannel_nr); 1699 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; 1700 list_add_tail(&inti->list, list); 1701 set_bit(isc_to_irq_type(isc), &fi->pending_irqs); 1702 spin_unlock(&fi->lock); 1703 return 0; 1704 } 1705 1706 /* 1707 * Find a destination VCPU for a floating irq and kick it. 1708 */ 1709 static void __floating_irq_kick(struct kvm *kvm, u64 type) 1710 { 1711 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1712 struct kvm_vcpu *dst_vcpu; 1713 int sigcpu, online_vcpus, nr_tries = 0; 1714 1715 online_vcpus = atomic_read(&kvm->online_vcpus); 1716 if (!online_vcpus) 1717 return; 1718 1719 /* find idle VCPUs first, then round robin */ 1720 sigcpu = find_first_bit(fi->idle_mask, online_vcpus); 1721 if (sigcpu == online_vcpus) { 1722 do { 1723 sigcpu = fi->next_rr_cpu; 1724 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; 1725 /* avoid endless loops if all vcpus are stopped */ 1726 if (nr_tries++ >= online_vcpus) 1727 return; 1728 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu))); 1729 } 1730 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 1731 1732 /* make the VCPU drop out of the SIE, or wake it up if sleeping */ 1733 switch (type) { 1734 case KVM_S390_MCHK: 1735 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); 1736 break; 1737 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1738 if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa)) 1739 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); 1740 break; 1741 default: 1742 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); 1743 break; 1744 } 1745 kvm_s390_vcpu_wakeup(dst_vcpu); 1746 } 1747 1748 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 1749 { 1750 u64 type = READ_ONCE(inti->type); 1751 int rc; 1752 1753 switch (type) { 1754 case KVM_S390_MCHK: 1755 rc = __inject_float_mchk(kvm, inti); 1756 break; 1757 case KVM_S390_INT_VIRTIO: 1758 rc = __inject_virtio(kvm, inti); 1759 break; 1760 case KVM_S390_INT_SERVICE: 1761 rc = __inject_service(kvm, inti); 1762 break; 1763 case KVM_S390_INT_PFAULT_DONE: 1764 rc = __inject_pfault_done(kvm, inti); 1765 break; 1766 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1767 rc = __inject_io(kvm, inti); 1768 break; 1769 default: 1770 rc = -EINVAL; 1771 } 1772 if (rc) 1773 return rc; 1774 1775 __floating_irq_kick(kvm, type); 1776 return 0; 1777 } 1778 1779 int kvm_s390_inject_vm(struct kvm *kvm, 1780 struct kvm_s390_interrupt *s390int) 1781 { 1782 struct kvm_s390_interrupt_info *inti; 1783 int rc; 1784 1785 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1786 if (!inti) 1787 return -ENOMEM; 1788 1789 inti->type = s390int->type; 1790 switch (inti->type) { 1791 case KVM_S390_INT_VIRTIO: 1792 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 1793 s390int->parm, s390int->parm64); 1794 inti->ext.ext_params = s390int->parm; 1795 inti->ext.ext_params2 = s390int->parm64; 1796 break; 1797 case KVM_S390_INT_SERVICE: 1798 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm); 1799 inti->ext.ext_params = s390int->parm; 1800 break; 1801 case KVM_S390_INT_PFAULT_DONE: 1802 inti->ext.ext_params2 = s390int->parm64; 1803 break; 1804 case KVM_S390_MCHK: 1805 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx", 1806 s390int->parm64); 1807 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 1808 inti->mchk.mcic = s390int->parm64; 1809 break; 1810 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1811 inti->io.subchannel_id = s390int->parm >> 16; 1812 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 1813 inti->io.io_int_parm = s390int->parm64 >> 32; 1814 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 1815 break; 1816 default: 1817 kfree(inti); 1818 return -EINVAL; 1819 } 1820 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 1821 2); 1822 1823 rc = __inject_vm(kvm, inti); 1824 if (rc) 1825 kfree(inti); 1826 return rc; 1827 } 1828 1829 int kvm_s390_reinject_io_int(struct kvm *kvm, 1830 struct kvm_s390_interrupt_info *inti) 1831 { 1832 return __inject_vm(kvm, inti); 1833 } 1834 1835 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, 1836 struct kvm_s390_irq *irq) 1837 { 1838 irq->type = s390int->type; 1839 switch (irq->type) { 1840 case KVM_S390_PROGRAM_INT: 1841 if (s390int->parm & 0xffff0000) 1842 return -EINVAL; 1843 irq->u.pgm.code = s390int->parm; 1844 break; 1845 case KVM_S390_SIGP_SET_PREFIX: 1846 irq->u.prefix.address = s390int->parm; 1847 break; 1848 case KVM_S390_SIGP_STOP: 1849 irq->u.stop.flags = s390int->parm; 1850 break; 1851 case KVM_S390_INT_EXTERNAL_CALL: 1852 if (s390int->parm & 0xffff0000) 1853 return -EINVAL; 1854 irq->u.extcall.code = s390int->parm; 1855 break; 1856 case KVM_S390_INT_EMERGENCY: 1857 if (s390int->parm & 0xffff0000) 1858 return -EINVAL; 1859 irq->u.emerg.code = s390int->parm; 1860 break; 1861 case KVM_S390_MCHK: 1862 irq->u.mchk.mcic = s390int->parm64; 1863 break; 1864 } 1865 return 0; 1866 } 1867 1868 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) 1869 { 1870 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1871 1872 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1873 } 1874 1875 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) 1876 { 1877 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1878 1879 spin_lock(&li->lock); 1880 li->irq.stop.flags = 0; 1881 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); 1882 spin_unlock(&li->lock); 1883 } 1884 1885 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1886 { 1887 int rc; 1888 1889 switch (irq->type) { 1890 case KVM_S390_PROGRAM_INT: 1891 rc = __inject_prog(vcpu, irq); 1892 break; 1893 case KVM_S390_SIGP_SET_PREFIX: 1894 rc = __inject_set_prefix(vcpu, irq); 1895 break; 1896 case KVM_S390_SIGP_STOP: 1897 rc = __inject_sigp_stop(vcpu, irq); 1898 break; 1899 case KVM_S390_RESTART: 1900 rc = __inject_sigp_restart(vcpu, irq); 1901 break; 1902 case KVM_S390_INT_CLOCK_COMP: 1903 rc = __inject_ckc(vcpu); 1904 break; 1905 case KVM_S390_INT_CPU_TIMER: 1906 rc = __inject_cpu_timer(vcpu); 1907 break; 1908 case KVM_S390_INT_EXTERNAL_CALL: 1909 rc = __inject_extcall(vcpu, irq); 1910 break; 1911 case KVM_S390_INT_EMERGENCY: 1912 rc = __inject_sigp_emergency(vcpu, irq); 1913 break; 1914 case KVM_S390_MCHK: 1915 rc = __inject_mchk(vcpu, irq); 1916 break; 1917 case KVM_S390_INT_PFAULT_INIT: 1918 rc = __inject_pfault_init(vcpu, irq); 1919 break; 1920 case KVM_S390_INT_VIRTIO: 1921 case KVM_S390_INT_SERVICE: 1922 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1923 default: 1924 rc = -EINVAL; 1925 } 1926 1927 return rc; 1928 } 1929 1930 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) 1931 { 1932 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 1933 int rc; 1934 1935 spin_lock(&li->lock); 1936 rc = do_inject_vcpu(vcpu, irq); 1937 spin_unlock(&li->lock); 1938 if (!rc) 1939 kvm_s390_vcpu_wakeup(vcpu); 1940 return rc; 1941 } 1942 1943 static inline void clear_irq_list(struct list_head *_list) 1944 { 1945 struct kvm_s390_interrupt_info *inti, *n; 1946 1947 list_for_each_entry_safe(inti, n, _list, list) { 1948 list_del(&inti->list); 1949 kfree(inti); 1950 } 1951 } 1952 1953 static void inti_to_irq(struct kvm_s390_interrupt_info *inti, 1954 struct kvm_s390_irq *irq) 1955 { 1956 irq->type = inti->type; 1957 switch (inti->type) { 1958 case KVM_S390_INT_PFAULT_INIT: 1959 case KVM_S390_INT_PFAULT_DONE: 1960 case KVM_S390_INT_VIRTIO: 1961 irq->u.ext = inti->ext; 1962 break; 1963 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1964 irq->u.io = inti->io; 1965 break; 1966 } 1967 } 1968 1969 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1970 { 1971 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 1972 int i; 1973 1974 spin_lock(&fi->lock); 1975 fi->pending_irqs = 0; 1976 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); 1977 memset(&fi->mchk, 0, sizeof(fi->mchk)); 1978 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1979 clear_irq_list(&fi->lists[i]); 1980 for (i = 0; i < FIRQ_MAX_COUNT; i++) 1981 fi->counters[i] = 0; 1982 spin_unlock(&fi->lock); 1983 kvm_s390_gisa_clear(kvm); 1984 }; 1985 1986 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) 1987 { 1988 struct kvm_s390_interrupt_info *inti; 1989 struct kvm_s390_float_interrupt *fi; 1990 struct kvm_s390_irq *buf; 1991 struct kvm_s390_irq *irq; 1992 int max_irqs; 1993 int ret = 0; 1994 int n = 0; 1995 int i; 1996 1997 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) 1998 return -EINVAL; 1999 2000 /* 2001 * We are already using -ENOMEM to signal 2002 * userspace it may retry with a bigger buffer, 2003 * so we need to use something else for this case 2004 */ 2005 buf = vzalloc(len); 2006 if (!buf) 2007 return -ENOBUFS; 2008 2009 max_irqs = len / sizeof(struct kvm_s390_irq); 2010 2011 if (kvm->arch.gisa && 2012 kvm_s390_gisa_get_ipm(kvm->arch.gisa)) { 2013 for (i = 0; i <= MAX_ISC; i++) { 2014 if (n == max_irqs) { 2015 /* signal userspace to try again */ 2016 ret = -ENOMEM; 2017 goto out_nolock; 2018 } 2019 if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) { 2020 irq = (struct kvm_s390_irq *) &buf[n]; 2021 irq->type = KVM_S390_INT_IO(1, 0, 0, 0); 2022 irq->u.io.io_int_word = isc_to_int_word(i); 2023 n++; 2024 } 2025 } 2026 } 2027 fi = &kvm->arch.float_int; 2028 spin_lock(&fi->lock); 2029 for (i = 0; i < FIRQ_LIST_COUNT; i++) { 2030 list_for_each_entry(inti, &fi->lists[i], list) { 2031 if (n == max_irqs) { 2032 /* signal userspace to try again */ 2033 ret = -ENOMEM; 2034 goto out; 2035 } 2036 inti_to_irq(inti, &buf[n]); 2037 n++; 2038 } 2039 } 2040 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { 2041 if (n == max_irqs) { 2042 /* signal userspace to try again */ 2043 ret = -ENOMEM; 2044 goto out; 2045 } 2046 irq = (struct kvm_s390_irq *) &buf[n]; 2047 irq->type = KVM_S390_INT_SERVICE; 2048 irq->u.ext = fi->srv_signal; 2049 n++; 2050 } 2051 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) { 2052 if (n == max_irqs) { 2053 /* signal userspace to try again */ 2054 ret = -ENOMEM; 2055 goto out; 2056 } 2057 irq = (struct kvm_s390_irq *) &buf[n]; 2058 irq->type = KVM_S390_MCHK; 2059 irq->u.mchk = fi->mchk; 2060 n++; 2061 } 2062 2063 out: 2064 spin_unlock(&fi->lock); 2065 out_nolock: 2066 if (!ret && n > 0) { 2067 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) 2068 ret = -EFAULT; 2069 } 2070 vfree(buf); 2071 2072 return ret < 0 ? ret : n; 2073 } 2074 2075 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr) 2076 { 2077 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 2078 struct kvm_s390_ais_all ais; 2079 2080 if (attr->attr < sizeof(ais)) 2081 return -EINVAL; 2082 2083 if (!test_kvm_facility(kvm, 72)) 2084 return -ENOTSUPP; 2085 2086 mutex_lock(&fi->ais_lock); 2087 ais.simm = fi->simm; 2088 ais.nimm = fi->nimm; 2089 mutex_unlock(&fi->ais_lock); 2090 2091 if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais))) 2092 return -EFAULT; 2093 2094 return 0; 2095 } 2096 2097 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2098 { 2099 int r; 2100 2101 switch (attr->group) { 2102 case KVM_DEV_FLIC_GET_ALL_IRQS: 2103 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, 2104 attr->attr); 2105 break; 2106 case KVM_DEV_FLIC_AISM_ALL: 2107 r = flic_ais_mode_get_all(dev->kvm, attr); 2108 break; 2109 default: 2110 r = -EINVAL; 2111 } 2112 2113 return r; 2114 } 2115 2116 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 2117 u64 addr) 2118 { 2119 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 2120 void *target = NULL; 2121 void __user *source; 2122 u64 size; 2123 2124 if (get_user(inti->type, (u64 __user *)addr)) 2125 return -EFAULT; 2126 2127 switch (inti->type) { 2128 case KVM_S390_INT_PFAULT_INIT: 2129 case KVM_S390_INT_PFAULT_DONE: 2130 case KVM_S390_INT_VIRTIO: 2131 case KVM_S390_INT_SERVICE: 2132 target = (void *) &inti->ext; 2133 source = &uptr->u.ext; 2134 size = sizeof(inti->ext); 2135 break; 2136 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 2137 target = (void *) &inti->io; 2138 source = &uptr->u.io; 2139 size = sizeof(inti->io); 2140 break; 2141 case KVM_S390_MCHK: 2142 target = (void *) &inti->mchk; 2143 source = &uptr->u.mchk; 2144 size = sizeof(inti->mchk); 2145 break; 2146 default: 2147 return -EINVAL; 2148 } 2149 2150 if (copy_from_user(target, source, size)) 2151 return -EFAULT; 2152 2153 return 0; 2154 } 2155 2156 static int enqueue_floating_irq(struct kvm_device *dev, 2157 struct kvm_device_attr *attr) 2158 { 2159 struct kvm_s390_interrupt_info *inti = NULL; 2160 int r = 0; 2161 int len = attr->attr; 2162 2163 if (len % sizeof(struct kvm_s390_irq) != 0) 2164 return -EINVAL; 2165 else if (len > KVM_S390_FLIC_MAX_BUFFER) 2166 return -EINVAL; 2167 2168 while (len >= sizeof(struct kvm_s390_irq)) { 2169 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 2170 if (!inti) 2171 return -ENOMEM; 2172 2173 r = copy_irq_from_user(inti, attr->addr); 2174 if (r) { 2175 kfree(inti); 2176 return r; 2177 } 2178 r = __inject_vm(dev->kvm, inti); 2179 if (r) { 2180 kfree(inti); 2181 return r; 2182 } 2183 len -= sizeof(struct kvm_s390_irq); 2184 attr->addr += sizeof(struct kvm_s390_irq); 2185 } 2186 2187 return r; 2188 } 2189 2190 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 2191 { 2192 if (id >= MAX_S390_IO_ADAPTERS) 2193 return NULL; 2194 return kvm->arch.adapters[id]; 2195 } 2196 2197 static int register_io_adapter(struct kvm_device *dev, 2198 struct kvm_device_attr *attr) 2199 { 2200 struct s390_io_adapter *adapter; 2201 struct kvm_s390_io_adapter adapter_info; 2202 2203 if (copy_from_user(&adapter_info, 2204 (void __user *)attr->addr, sizeof(adapter_info))) 2205 return -EFAULT; 2206 2207 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 2208 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 2209 return -EINVAL; 2210 2211 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 2212 if (!adapter) 2213 return -ENOMEM; 2214 2215 INIT_LIST_HEAD(&adapter->maps); 2216 init_rwsem(&adapter->maps_lock); 2217 atomic_set(&adapter->nr_maps, 0); 2218 adapter->id = adapter_info.id; 2219 adapter->isc = adapter_info.isc; 2220 adapter->maskable = adapter_info.maskable; 2221 adapter->masked = false; 2222 adapter->swap = adapter_info.swap; 2223 adapter->suppressible = (adapter_info.flags) & 2224 KVM_S390_ADAPTER_SUPPRESSIBLE; 2225 dev->kvm->arch.adapters[adapter->id] = adapter; 2226 2227 return 0; 2228 } 2229 2230 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 2231 { 2232 int ret; 2233 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 2234 2235 if (!adapter || !adapter->maskable) 2236 return -EINVAL; 2237 ret = adapter->masked; 2238 adapter->masked = masked; 2239 return ret; 2240 } 2241 2242 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 2243 { 2244 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 2245 struct s390_map_info *map; 2246 int ret; 2247 2248 if (!adapter || !addr) 2249 return -EINVAL; 2250 2251 map = kzalloc(sizeof(*map), GFP_KERNEL); 2252 if (!map) { 2253 ret = -ENOMEM; 2254 goto out; 2255 } 2256 INIT_LIST_HEAD(&map->list); 2257 map->guest_addr = addr; 2258 map->addr = gmap_translate(kvm->arch.gmap, addr); 2259 if (map->addr == -EFAULT) { 2260 ret = -EFAULT; 2261 goto out; 2262 } 2263 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 2264 if (ret < 0) 2265 goto out; 2266 BUG_ON(ret != 1); 2267 down_write(&adapter->maps_lock); 2268 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 2269 list_add_tail(&map->list, &adapter->maps); 2270 ret = 0; 2271 } else { 2272 put_page(map->page); 2273 ret = -EINVAL; 2274 } 2275 up_write(&adapter->maps_lock); 2276 out: 2277 if (ret) 2278 kfree(map); 2279 return ret; 2280 } 2281 2282 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 2283 { 2284 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 2285 struct s390_map_info *map, *tmp; 2286 int found = 0; 2287 2288 if (!adapter || !addr) 2289 return -EINVAL; 2290 2291 down_write(&adapter->maps_lock); 2292 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 2293 if (map->guest_addr == addr) { 2294 found = 1; 2295 atomic_dec(&adapter->nr_maps); 2296 list_del(&map->list); 2297 put_page(map->page); 2298 kfree(map); 2299 break; 2300 } 2301 } 2302 up_write(&adapter->maps_lock); 2303 2304 return found ? 0 : -EINVAL; 2305 } 2306 2307 void kvm_s390_destroy_adapters(struct kvm *kvm) 2308 { 2309 int i; 2310 struct s390_map_info *map, *tmp; 2311 2312 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 2313 if (!kvm->arch.adapters[i]) 2314 continue; 2315 list_for_each_entry_safe(map, tmp, 2316 &kvm->arch.adapters[i]->maps, list) { 2317 list_del(&map->list); 2318 put_page(map->page); 2319 kfree(map); 2320 } 2321 kfree(kvm->arch.adapters[i]); 2322 } 2323 } 2324 2325 static int modify_io_adapter(struct kvm_device *dev, 2326 struct kvm_device_attr *attr) 2327 { 2328 struct kvm_s390_io_adapter_req req; 2329 struct s390_io_adapter *adapter; 2330 int ret; 2331 2332 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2333 return -EFAULT; 2334 2335 adapter = get_io_adapter(dev->kvm, req.id); 2336 if (!adapter) 2337 return -EINVAL; 2338 switch (req.type) { 2339 case KVM_S390_IO_ADAPTER_MASK: 2340 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 2341 if (ret > 0) 2342 ret = 0; 2343 break; 2344 case KVM_S390_IO_ADAPTER_MAP: 2345 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 2346 break; 2347 case KVM_S390_IO_ADAPTER_UNMAP: 2348 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 2349 break; 2350 default: 2351 ret = -EINVAL; 2352 } 2353 2354 return ret; 2355 } 2356 2357 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr) 2358 2359 { 2360 const u64 isc_mask = 0xffUL << 24; /* all iscs set */ 2361 u32 schid; 2362 2363 if (attr->flags) 2364 return -EINVAL; 2365 if (attr->attr != sizeof(schid)) 2366 return -EINVAL; 2367 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid))) 2368 return -EFAULT; 2369 if (!schid) 2370 return -EINVAL; 2371 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid)); 2372 /* 2373 * If userspace is conforming to the architecture, we can have at most 2374 * one pending I/O interrupt per subchannel, so this is effectively a 2375 * clear all. 2376 */ 2377 return 0; 2378 } 2379 2380 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr) 2381 { 2382 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 2383 struct kvm_s390_ais_req req; 2384 int ret = 0; 2385 2386 if (!test_kvm_facility(kvm, 72)) 2387 return -ENOTSUPP; 2388 2389 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2390 return -EFAULT; 2391 2392 if (req.isc > MAX_ISC) 2393 return -EINVAL; 2394 2395 trace_kvm_s390_modify_ais_mode(req.isc, 2396 (fi->simm & AIS_MODE_MASK(req.isc)) ? 2397 (fi->nimm & AIS_MODE_MASK(req.isc)) ? 2398 2 : KVM_S390_AIS_MODE_SINGLE : 2399 KVM_S390_AIS_MODE_ALL, req.mode); 2400 2401 mutex_lock(&fi->ais_lock); 2402 switch (req.mode) { 2403 case KVM_S390_AIS_MODE_ALL: 2404 fi->simm &= ~AIS_MODE_MASK(req.isc); 2405 fi->nimm &= ~AIS_MODE_MASK(req.isc); 2406 break; 2407 case KVM_S390_AIS_MODE_SINGLE: 2408 fi->simm |= AIS_MODE_MASK(req.isc); 2409 fi->nimm &= ~AIS_MODE_MASK(req.isc); 2410 break; 2411 default: 2412 ret = -EINVAL; 2413 } 2414 mutex_unlock(&fi->ais_lock); 2415 2416 return ret; 2417 } 2418 2419 static int kvm_s390_inject_airq(struct kvm *kvm, 2420 struct s390_io_adapter *adapter) 2421 { 2422 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 2423 struct kvm_s390_interrupt s390int = { 2424 .type = KVM_S390_INT_IO(1, 0, 0, 0), 2425 .parm = 0, 2426 .parm64 = isc_to_int_word(adapter->isc), 2427 }; 2428 int ret = 0; 2429 2430 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible) 2431 return kvm_s390_inject_vm(kvm, &s390int); 2432 2433 mutex_lock(&fi->ais_lock); 2434 if (fi->nimm & AIS_MODE_MASK(adapter->isc)) { 2435 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc); 2436 goto out; 2437 } 2438 2439 ret = kvm_s390_inject_vm(kvm, &s390int); 2440 if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) { 2441 fi->nimm |= AIS_MODE_MASK(adapter->isc); 2442 trace_kvm_s390_modify_ais_mode(adapter->isc, 2443 KVM_S390_AIS_MODE_SINGLE, 2); 2444 } 2445 out: 2446 mutex_unlock(&fi->ais_lock); 2447 return ret; 2448 } 2449 2450 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr) 2451 { 2452 unsigned int id = attr->attr; 2453 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 2454 2455 if (!adapter) 2456 return -EINVAL; 2457 2458 return kvm_s390_inject_airq(kvm, adapter); 2459 } 2460 2461 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr) 2462 { 2463 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 2464 struct kvm_s390_ais_all ais; 2465 2466 if (!test_kvm_facility(kvm, 72)) 2467 return -ENOTSUPP; 2468 2469 if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais))) 2470 return -EFAULT; 2471 2472 mutex_lock(&fi->ais_lock); 2473 fi->simm = ais.simm; 2474 fi->nimm = ais.nimm; 2475 mutex_unlock(&fi->ais_lock); 2476 2477 return 0; 2478 } 2479 2480 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2481 { 2482 int r = 0; 2483 unsigned int i; 2484 struct kvm_vcpu *vcpu; 2485 2486 switch (attr->group) { 2487 case KVM_DEV_FLIC_ENQUEUE: 2488 r = enqueue_floating_irq(dev, attr); 2489 break; 2490 case KVM_DEV_FLIC_CLEAR_IRQS: 2491 kvm_s390_clear_float_irqs(dev->kvm); 2492 break; 2493 case KVM_DEV_FLIC_APF_ENABLE: 2494 dev->kvm->arch.gmap->pfault_enabled = 1; 2495 break; 2496 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2497 dev->kvm->arch.gmap->pfault_enabled = 0; 2498 /* 2499 * Make sure no async faults are in transition when 2500 * clearing the queues. So we don't need to worry 2501 * about late coming workers. 2502 */ 2503 synchronize_srcu(&dev->kvm->srcu); 2504 kvm_for_each_vcpu(i, vcpu, dev->kvm) 2505 kvm_clear_async_pf_completion_queue(vcpu); 2506 break; 2507 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2508 r = register_io_adapter(dev, attr); 2509 break; 2510 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2511 r = modify_io_adapter(dev, attr); 2512 break; 2513 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2514 r = clear_io_irq(dev->kvm, attr); 2515 break; 2516 case KVM_DEV_FLIC_AISM: 2517 r = modify_ais_mode(dev->kvm, attr); 2518 break; 2519 case KVM_DEV_FLIC_AIRQ_INJECT: 2520 r = flic_inject_airq(dev->kvm, attr); 2521 break; 2522 case KVM_DEV_FLIC_AISM_ALL: 2523 r = flic_ais_mode_set_all(dev->kvm, attr); 2524 break; 2525 default: 2526 r = -EINVAL; 2527 } 2528 2529 return r; 2530 } 2531 2532 static int flic_has_attr(struct kvm_device *dev, 2533 struct kvm_device_attr *attr) 2534 { 2535 switch (attr->group) { 2536 case KVM_DEV_FLIC_GET_ALL_IRQS: 2537 case KVM_DEV_FLIC_ENQUEUE: 2538 case KVM_DEV_FLIC_CLEAR_IRQS: 2539 case KVM_DEV_FLIC_APF_ENABLE: 2540 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 2541 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2542 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2543 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2544 case KVM_DEV_FLIC_AISM: 2545 case KVM_DEV_FLIC_AIRQ_INJECT: 2546 case KVM_DEV_FLIC_AISM_ALL: 2547 return 0; 2548 } 2549 return -ENXIO; 2550 } 2551 2552 static int flic_create(struct kvm_device *dev, u32 type) 2553 { 2554 if (!dev) 2555 return -EINVAL; 2556 if (dev->kvm->arch.flic) 2557 return -EINVAL; 2558 dev->kvm->arch.flic = dev; 2559 return 0; 2560 } 2561 2562 static void flic_destroy(struct kvm_device *dev) 2563 { 2564 dev->kvm->arch.flic = NULL; 2565 kfree(dev); 2566 } 2567 2568 /* s390 floating irq controller (flic) */ 2569 struct kvm_device_ops kvm_flic_ops = { 2570 .name = "kvm-flic", 2571 .get_attr = flic_get_attr, 2572 .set_attr = flic_set_attr, 2573 .has_attr = flic_has_attr, 2574 .create = flic_create, 2575 .destroy = flic_destroy, 2576 }; 2577 2578 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 2579 { 2580 unsigned long bit; 2581 2582 bit = bit_nr + (addr % PAGE_SIZE) * 8; 2583 2584 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 2585 } 2586 2587 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 2588 u64 addr) 2589 { 2590 struct s390_map_info *map; 2591 2592 if (!adapter) 2593 return NULL; 2594 2595 list_for_each_entry(map, &adapter->maps, list) { 2596 if (map->guest_addr == addr) 2597 return map; 2598 } 2599 return NULL; 2600 } 2601 2602 static int adapter_indicators_set(struct kvm *kvm, 2603 struct s390_io_adapter *adapter, 2604 struct kvm_s390_adapter_int *adapter_int) 2605 { 2606 unsigned long bit; 2607 int summary_set, idx; 2608 struct s390_map_info *info; 2609 void *map; 2610 2611 info = get_map_info(adapter, adapter_int->ind_addr); 2612 if (!info) 2613 return -1; 2614 map = page_address(info->page); 2615 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 2616 set_bit(bit, map); 2617 idx = srcu_read_lock(&kvm->srcu); 2618 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2619 set_page_dirty_lock(info->page); 2620 info = get_map_info(adapter, adapter_int->summary_addr); 2621 if (!info) { 2622 srcu_read_unlock(&kvm->srcu, idx); 2623 return -1; 2624 } 2625 map = page_address(info->page); 2626 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 2627 adapter->swap); 2628 summary_set = test_and_set_bit(bit, map); 2629 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 2630 set_page_dirty_lock(info->page); 2631 srcu_read_unlock(&kvm->srcu, idx); 2632 return summary_set ? 0 : 1; 2633 } 2634 2635 /* 2636 * < 0 - not injected due to error 2637 * = 0 - coalesced, summary indicator already active 2638 * > 0 - injected interrupt 2639 */ 2640 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 2641 struct kvm *kvm, int irq_source_id, int level, 2642 bool line_status) 2643 { 2644 int ret; 2645 struct s390_io_adapter *adapter; 2646 2647 /* We're only interested in the 0->1 transition. */ 2648 if (!level) 2649 return 0; 2650 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 2651 if (!adapter) 2652 return -1; 2653 down_read(&adapter->maps_lock); 2654 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 2655 up_read(&adapter->maps_lock); 2656 if ((ret > 0) && !adapter->masked) { 2657 ret = kvm_s390_inject_airq(kvm, adapter); 2658 if (ret == 0) 2659 ret = 1; 2660 } 2661 return ret; 2662 } 2663 2664 /* 2665 * Inject the machine check to the guest. 2666 */ 2667 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, 2668 struct mcck_volatile_info *mcck_info) 2669 { 2670 struct kvm_s390_interrupt_info inti; 2671 struct kvm_s390_irq irq; 2672 struct kvm_s390_mchk_info *mchk; 2673 union mci mci; 2674 __u64 cr14 = 0; /* upper bits are not used */ 2675 int rc; 2676 2677 mci.val = mcck_info->mcic; 2678 if (mci.sr) 2679 cr14 |= CR14_RECOVERY_SUBMASK; 2680 if (mci.dg) 2681 cr14 |= CR14_DEGRADATION_SUBMASK; 2682 if (mci.w) 2683 cr14 |= CR14_WARNING_SUBMASK; 2684 2685 mchk = mci.ck ? &inti.mchk : &irq.u.mchk; 2686 mchk->cr14 = cr14; 2687 mchk->mcic = mcck_info->mcic; 2688 mchk->ext_damage_code = mcck_info->ext_damage_code; 2689 mchk->failing_storage_address = mcck_info->failing_storage_address; 2690 if (mci.ck) { 2691 /* Inject the floating machine check */ 2692 inti.type = KVM_S390_MCHK; 2693 rc = __inject_vm(vcpu->kvm, &inti); 2694 } else { 2695 /* Inject the machine check to specified vcpu */ 2696 irq.type = KVM_S390_MCHK; 2697 rc = kvm_s390_inject_vcpu(vcpu, &irq); 2698 } 2699 WARN_ON_ONCE(rc); 2700 } 2701 2702 int kvm_set_routing_entry(struct kvm *kvm, 2703 struct kvm_kernel_irq_routing_entry *e, 2704 const struct kvm_irq_routing_entry *ue) 2705 { 2706 int ret; 2707 2708 switch (ue->type) { 2709 case KVM_IRQ_ROUTING_S390_ADAPTER: 2710 e->set = set_adapter_int; 2711 e->adapter.summary_addr = ue->u.adapter.summary_addr; 2712 e->adapter.ind_addr = ue->u.adapter.ind_addr; 2713 e->adapter.summary_offset = ue->u.adapter.summary_offset; 2714 e->adapter.ind_offset = ue->u.adapter.ind_offset; 2715 e->adapter.adapter_id = ue->u.adapter.adapter_id; 2716 ret = 0; 2717 break; 2718 default: 2719 ret = -EINVAL; 2720 } 2721 2722 return ret; 2723 } 2724 2725 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 2726 int irq_source_id, int level, bool line_status) 2727 { 2728 return -EINVAL; 2729 } 2730 2731 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len) 2732 { 2733 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2734 struct kvm_s390_irq *buf; 2735 int r = 0; 2736 int n; 2737 2738 buf = vmalloc(len); 2739 if (!buf) 2740 return -ENOMEM; 2741 2742 if (copy_from_user((void *) buf, irqstate, len)) { 2743 r = -EFAULT; 2744 goto out_free; 2745 } 2746 2747 /* 2748 * Don't allow setting the interrupt state 2749 * when there are already interrupts pending 2750 */ 2751 spin_lock(&li->lock); 2752 if (li->pending_irqs) { 2753 r = -EBUSY; 2754 goto out_unlock; 2755 } 2756 2757 for (n = 0; n < len / sizeof(*buf); n++) { 2758 r = do_inject_vcpu(vcpu, &buf[n]); 2759 if (r) 2760 break; 2761 } 2762 2763 out_unlock: 2764 spin_unlock(&li->lock); 2765 out_free: 2766 vfree(buf); 2767 2768 return r; 2769 } 2770 2771 static void store_local_irq(struct kvm_s390_local_interrupt *li, 2772 struct kvm_s390_irq *irq, 2773 unsigned long irq_type) 2774 { 2775 switch (irq_type) { 2776 case IRQ_PEND_MCHK_EX: 2777 case IRQ_PEND_MCHK_REP: 2778 irq->type = KVM_S390_MCHK; 2779 irq->u.mchk = li->irq.mchk; 2780 break; 2781 case IRQ_PEND_PROG: 2782 irq->type = KVM_S390_PROGRAM_INT; 2783 irq->u.pgm = li->irq.pgm; 2784 break; 2785 case IRQ_PEND_PFAULT_INIT: 2786 irq->type = KVM_S390_INT_PFAULT_INIT; 2787 irq->u.ext = li->irq.ext; 2788 break; 2789 case IRQ_PEND_EXT_EXTERNAL: 2790 irq->type = KVM_S390_INT_EXTERNAL_CALL; 2791 irq->u.extcall = li->irq.extcall; 2792 break; 2793 case IRQ_PEND_EXT_CLOCK_COMP: 2794 irq->type = KVM_S390_INT_CLOCK_COMP; 2795 break; 2796 case IRQ_PEND_EXT_CPU_TIMER: 2797 irq->type = KVM_S390_INT_CPU_TIMER; 2798 break; 2799 case IRQ_PEND_SIGP_STOP: 2800 irq->type = KVM_S390_SIGP_STOP; 2801 irq->u.stop = li->irq.stop; 2802 break; 2803 case IRQ_PEND_RESTART: 2804 irq->type = KVM_S390_RESTART; 2805 break; 2806 case IRQ_PEND_SET_PREFIX: 2807 irq->type = KVM_S390_SIGP_SET_PREFIX; 2808 irq->u.prefix = li->irq.prefix; 2809 break; 2810 } 2811 } 2812 2813 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) 2814 { 2815 int scn; 2816 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 2817 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 2818 unsigned long pending_irqs; 2819 struct kvm_s390_irq irq; 2820 unsigned long irq_type; 2821 int cpuaddr; 2822 int n = 0; 2823 2824 spin_lock(&li->lock); 2825 pending_irqs = li->pending_irqs; 2826 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending, 2827 sizeof(sigp_emerg_pending)); 2828 spin_unlock(&li->lock); 2829 2830 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) { 2831 memset(&irq, 0, sizeof(irq)); 2832 if (irq_type == IRQ_PEND_EXT_EMERGENCY) 2833 continue; 2834 if (n + sizeof(irq) > len) 2835 return -ENOBUFS; 2836 store_local_irq(&vcpu->arch.local_int, &irq, irq_type); 2837 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2838 return -EFAULT; 2839 n += sizeof(irq); 2840 } 2841 2842 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) { 2843 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) { 2844 memset(&irq, 0, sizeof(irq)); 2845 if (n + sizeof(irq) > len) 2846 return -ENOBUFS; 2847 irq.type = KVM_S390_INT_EMERGENCY; 2848 irq.u.emerg.code = cpuaddr; 2849 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2850 return -EFAULT; 2851 n += sizeof(irq); 2852 } 2853 } 2854 2855 if (sca_ext_call_pending(vcpu, &scn)) { 2856 if (n + sizeof(irq) > len) 2857 return -ENOBUFS; 2858 memset(&irq, 0, sizeof(irq)); 2859 irq.type = KVM_S390_INT_EXTERNAL_CALL; 2860 irq.u.extcall.code = scn; 2861 if (copy_to_user(&buf[n], &irq, sizeof(irq))) 2862 return -EFAULT; 2863 n += sizeof(irq); 2864 } 2865 2866 return n; 2867 } 2868 2869 void kvm_s390_gisa_clear(struct kvm *kvm) 2870 { 2871 if (kvm->arch.gisa) { 2872 memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa)); 2873 kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa; 2874 VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa); 2875 } 2876 } 2877 2878 void kvm_s390_gisa_init(struct kvm *kvm) 2879 { 2880 if (css_general_characteristics.aiv) { 2881 kvm->arch.gisa = &kvm->arch.sie_page2->gisa; 2882 VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa); 2883 kvm_s390_gisa_clear(kvm); 2884 } 2885 } 2886 2887 void kvm_s390_gisa_destroy(struct kvm *kvm) 2888 { 2889 if (!kvm->arch.gisa) 2890 return; 2891 kvm->arch.gisa = NULL; 2892 } 2893