1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/signal.h> 17 #include <linux/slab.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/uaccess.h> 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 #include "trace-s390.h" 23 24 #define IOINT_SCHID_MASK 0x0000ffff 25 #define IOINT_SSID_MASK 0x00030000 26 #define IOINT_CSSID_MASK 0x03fc0000 27 #define IOINT_AI_MASK 0x04000000 28 29 static int is_ioint(u64 type) 30 { 31 return ((type & 0xfffe0000u) != 0xfffe0000u); 32 } 33 34 static int psw_extint_disabled(struct kvm_vcpu *vcpu) 35 { 36 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 37 } 38 39 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 40 { 41 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 42 } 43 44 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 45 { 46 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 47 } 48 49 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 50 { 51 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 52 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 53 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 54 return 0; 55 return 1; 56 } 57 58 static u64 int_word_to_isc_bits(u32 int_word) 59 { 60 u8 isc = (int_word & 0x38000000) >> 27; 61 62 return (0x80 >> isc) << 24; 63 } 64 65 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 66 struct kvm_s390_interrupt_info *inti) 67 { 68 switch (inti->type) { 69 case KVM_S390_INT_EXTERNAL_CALL: 70 if (psw_extint_disabled(vcpu)) 71 return 0; 72 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 73 return 1; 74 case KVM_S390_INT_EMERGENCY: 75 if (psw_extint_disabled(vcpu)) 76 return 0; 77 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 78 return 1; 79 return 0; 80 case KVM_S390_INT_SERVICE: 81 if (psw_extint_disabled(vcpu)) 82 return 0; 83 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 84 return 1; 85 return 0; 86 case KVM_S390_INT_VIRTIO: 87 if (psw_extint_disabled(vcpu)) 88 return 0; 89 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 90 return 1; 91 return 0; 92 case KVM_S390_PROGRAM_INT: 93 case KVM_S390_SIGP_STOP: 94 case KVM_S390_SIGP_SET_PREFIX: 95 case KVM_S390_RESTART: 96 return 1; 97 case KVM_S390_MCHK: 98 if (psw_mchk_disabled(vcpu)) 99 return 0; 100 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) 101 return 1; 102 return 0; 103 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 104 if (psw_ioint_disabled(vcpu)) 105 return 0; 106 if (vcpu->arch.sie_block->gcr[6] & 107 int_word_to_isc_bits(inti->io.io_int_word)) 108 return 1; 109 return 0; 110 default: 111 printk(KERN_WARNING "illegal interrupt type %llx\n", 112 inti->type); 113 BUG(); 114 } 115 return 0; 116 } 117 118 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 119 { 120 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 121 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 122 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 123 } 124 125 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 126 { 127 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 128 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 129 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 130 } 131 132 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 133 { 134 atomic_clear_mask(CPUSTAT_ECALL_PEND | 135 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 136 &vcpu->arch.sie_block->cpuflags); 137 vcpu->arch.sie_block->lctl = 0x0000; 138 vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; 139 } 140 141 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 142 { 143 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 144 } 145 146 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 147 struct kvm_s390_interrupt_info *inti) 148 { 149 switch (inti->type) { 150 case KVM_S390_INT_EXTERNAL_CALL: 151 case KVM_S390_INT_EMERGENCY: 152 case KVM_S390_INT_SERVICE: 153 case KVM_S390_INT_VIRTIO: 154 if (psw_extint_disabled(vcpu)) 155 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 156 else 157 vcpu->arch.sie_block->lctl |= LCTL_CR0; 158 break; 159 case KVM_S390_SIGP_STOP: 160 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 161 break; 162 case KVM_S390_MCHK: 163 if (psw_mchk_disabled(vcpu)) 164 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 165 else 166 vcpu->arch.sie_block->lctl |= LCTL_CR14; 167 break; 168 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 169 if (psw_ioint_disabled(vcpu)) 170 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 171 else 172 vcpu->arch.sie_block->lctl |= LCTL_CR6; 173 break; 174 default: 175 BUG(); 176 } 177 } 178 179 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 180 struct kvm_s390_interrupt_info *inti) 181 { 182 const unsigned short table[] = { 2, 4, 4, 6 }; 183 int rc = 0; 184 185 switch (inti->type) { 186 case KVM_S390_INT_EMERGENCY: 187 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 188 vcpu->stat.deliver_emergency_signal++; 189 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 190 inti->emerg.code, 0); 191 rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE); 192 rc |= put_guest(vcpu, inti->emerg.code, 193 (u16 __user *)__LC_EXT_CPU_ADDR); 194 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 195 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 196 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 197 __LC_EXT_NEW_PSW, sizeof(psw_t)); 198 break; 199 case KVM_S390_INT_EXTERNAL_CALL: 200 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 201 vcpu->stat.deliver_external_call++; 202 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 203 inti->extcall.code, 0); 204 rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE); 205 rc |= put_guest(vcpu, inti->extcall.code, 206 (u16 __user *)__LC_EXT_CPU_ADDR); 207 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 208 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 209 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 210 __LC_EXT_NEW_PSW, sizeof(psw_t)); 211 break; 212 case KVM_S390_INT_SERVICE: 213 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 214 inti->ext.ext_params); 215 vcpu->stat.deliver_service_signal++; 216 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 217 inti->ext.ext_params, 0); 218 rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE); 219 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 220 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 221 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 222 __LC_EXT_NEW_PSW, sizeof(psw_t)); 223 rc |= put_guest(vcpu, inti->ext.ext_params, 224 (u32 __user *)__LC_EXT_PARAMS); 225 break; 226 case KVM_S390_INT_VIRTIO: 227 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 228 inti->ext.ext_params, inti->ext.ext_params2); 229 vcpu->stat.deliver_virtio_interrupt++; 230 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 231 inti->ext.ext_params, 232 inti->ext.ext_params2); 233 rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE); 234 rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR); 235 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 236 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 237 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 238 __LC_EXT_NEW_PSW, sizeof(psw_t)); 239 rc |= put_guest(vcpu, inti->ext.ext_params, 240 (u32 __user *)__LC_EXT_PARAMS); 241 rc |= put_guest(vcpu, inti->ext.ext_params2, 242 (u64 __user *)__LC_EXT_PARAMS2); 243 break; 244 case KVM_S390_SIGP_STOP: 245 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 246 vcpu->stat.deliver_stop_signal++; 247 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 248 0, 0); 249 __set_intercept_indicator(vcpu, inti); 250 break; 251 252 case KVM_S390_SIGP_SET_PREFIX: 253 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 254 inti->prefix.address); 255 vcpu->stat.deliver_prefix_signal++; 256 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 257 inti->prefix.address, 0); 258 kvm_s390_set_prefix(vcpu, inti->prefix.address); 259 break; 260 261 case KVM_S390_RESTART: 262 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 263 vcpu->stat.deliver_restart_signal++; 264 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 265 0, 0); 266 rc = copy_to_guest(vcpu, 267 offsetof(struct _lowcore, restart_old_psw), 268 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 269 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 270 offsetof(struct _lowcore, restart_psw), 271 sizeof(psw_t)); 272 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 273 break; 274 case KVM_S390_PROGRAM_INT: 275 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 276 inti->pgm.code, 277 table[vcpu->arch.sie_block->ipa >> 14]); 278 vcpu->stat.deliver_program_int++; 279 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 280 inti->pgm.code, 0); 281 rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE); 282 rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14], 283 (u16 __user *)__LC_PGM_ILC); 284 rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, 285 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 286 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 287 __LC_PGM_NEW_PSW, sizeof(psw_t)); 288 break; 289 290 case KVM_S390_MCHK: 291 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 292 inti->mchk.mcic); 293 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 294 inti->mchk.cr14, 295 inti->mchk.mcic); 296 rc = kvm_s390_vcpu_store_status(vcpu, 297 KVM_S390_STORE_STATUS_PREFIXED); 298 rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE); 299 rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, 300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 301 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 302 __LC_MCK_NEW_PSW, sizeof(psw_t)); 303 break; 304 305 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 306 { 307 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | 308 inti->io.subchannel_nr; 309 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | 310 inti->io.io_int_word; 311 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 312 vcpu->stat.deliver_io_int++; 313 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 314 param0, param1); 315 rc = put_guest(vcpu, inti->io.subchannel_id, 316 (u16 __user *) __LC_SUBCHANNEL_ID); 317 rc |= put_guest(vcpu, inti->io.subchannel_nr, 318 (u16 __user *) __LC_SUBCHANNEL_NR); 319 rc |= put_guest(vcpu, inti->io.io_int_parm, 320 (u32 __user *) __LC_IO_INT_PARM); 321 rc |= put_guest(vcpu, inti->io.io_int_word, 322 (u32 __user *) __LC_IO_INT_WORD); 323 rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, 324 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 325 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 326 __LC_IO_NEW_PSW, sizeof(psw_t)); 327 break; 328 } 329 default: 330 BUG(); 331 } 332 if (rc) { 333 printk("kvm: The guest lowcore is not mapped during interrupt " 334 "delivery, killing userspace\n"); 335 do_exit(SIGKILL); 336 } 337 } 338 339 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 340 { 341 int rc; 342 343 if (psw_extint_disabled(vcpu)) 344 return 0; 345 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 346 return 0; 347 rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); 348 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 349 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 350 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 351 __LC_EXT_NEW_PSW, sizeof(psw_t)); 352 if (rc) { 353 printk("kvm: The guest lowcore is not mapped during interrupt " 354 "delivery, killing userspace\n"); 355 do_exit(SIGKILL); 356 } 357 return 1; 358 } 359 360 static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 361 { 362 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 363 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 364 struct kvm_s390_interrupt_info *inti; 365 int rc = 0; 366 367 if (atomic_read(&li->active)) { 368 spin_lock_bh(&li->lock); 369 list_for_each_entry(inti, &li->list, list) 370 if (__interrupt_is_deliverable(vcpu, inti)) { 371 rc = 1; 372 break; 373 } 374 spin_unlock_bh(&li->lock); 375 } 376 377 if ((!rc) && atomic_read(&fi->active)) { 378 spin_lock(&fi->lock); 379 list_for_each_entry(inti, &fi->list, list) 380 if (__interrupt_is_deliverable(vcpu, inti)) { 381 rc = 1; 382 break; 383 } 384 spin_unlock(&fi->lock); 385 } 386 387 if ((!rc) && (vcpu->arch.sie_block->ckc < 388 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { 389 if ((!psw_extint_disabled(vcpu)) && 390 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 391 rc = 1; 392 } 393 394 return rc; 395 } 396 397 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 398 { 399 return 0; 400 } 401 402 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 403 { 404 u64 now, sltime; 405 DECLARE_WAITQUEUE(wait, current); 406 407 vcpu->stat.exit_wait_state++; 408 if (kvm_cpu_has_interrupt(vcpu)) 409 return 0; 410 411 __set_cpu_idle(vcpu); 412 spin_lock_bh(&vcpu->arch.local_int.lock); 413 vcpu->arch.local_int.timer_due = 0; 414 spin_unlock_bh(&vcpu->arch.local_int.lock); 415 416 if (psw_interrupts_disabled(vcpu)) { 417 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 418 __unset_cpu_idle(vcpu); 419 return -EOPNOTSUPP; /* disabled wait */ 420 } 421 422 if (psw_extint_disabled(vcpu) || 423 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { 424 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 425 goto no_timer; 426 } 427 428 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 429 if (vcpu->arch.sie_block->ckc < now) { 430 __unset_cpu_idle(vcpu); 431 return 0; 432 } 433 434 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 435 436 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 437 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 438 no_timer: 439 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 440 spin_lock(&vcpu->arch.local_int.float_int->lock); 441 spin_lock_bh(&vcpu->arch.local_int.lock); 442 add_wait_queue(&vcpu->wq, &wait); 443 while (list_empty(&vcpu->arch.local_int.list) && 444 list_empty(&vcpu->arch.local_int.float_int->list) && 445 (!vcpu->arch.local_int.timer_due) && 446 !signal_pending(current)) { 447 set_current_state(TASK_INTERRUPTIBLE); 448 spin_unlock_bh(&vcpu->arch.local_int.lock); 449 spin_unlock(&vcpu->arch.local_int.float_int->lock); 450 schedule(); 451 spin_lock(&vcpu->arch.local_int.float_int->lock); 452 spin_lock_bh(&vcpu->arch.local_int.lock); 453 } 454 __unset_cpu_idle(vcpu); 455 __set_current_state(TASK_RUNNING); 456 remove_wait_queue(&vcpu->wq, &wait); 457 spin_unlock_bh(&vcpu->arch.local_int.lock); 458 spin_unlock(&vcpu->arch.local_int.float_int->lock); 459 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 460 461 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 462 return 0; 463 } 464 465 void kvm_s390_tasklet(unsigned long parm) 466 { 467 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 468 469 spin_lock(&vcpu->arch.local_int.lock); 470 vcpu->arch.local_int.timer_due = 1; 471 if (waitqueue_active(&vcpu->wq)) 472 wake_up_interruptible(&vcpu->wq); 473 spin_unlock(&vcpu->arch.local_int.lock); 474 } 475 476 /* 477 * low level hrtimer wake routine. Because this runs in hardirq context 478 * we schedule a tasklet to do the real work. 479 */ 480 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 481 { 482 struct kvm_vcpu *vcpu; 483 484 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 485 tasklet_schedule(&vcpu->arch.tasklet); 486 487 return HRTIMER_NORESTART; 488 } 489 490 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 491 { 492 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 493 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 494 struct kvm_s390_interrupt_info *n, *inti = NULL; 495 int deliver; 496 497 __reset_intercept_indicators(vcpu); 498 if (atomic_read(&li->active)) { 499 do { 500 deliver = 0; 501 spin_lock_bh(&li->lock); 502 list_for_each_entry_safe(inti, n, &li->list, list) { 503 if (__interrupt_is_deliverable(vcpu, inti)) { 504 list_del(&inti->list); 505 deliver = 1; 506 break; 507 } 508 __set_intercept_indicator(vcpu, inti); 509 } 510 if (list_empty(&li->list)) 511 atomic_set(&li->active, 0); 512 spin_unlock_bh(&li->lock); 513 if (deliver) { 514 __do_deliver_interrupt(vcpu, inti); 515 kfree(inti); 516 } 517 } while (deliver); 518 } 519 520 if ((vcpu->arch.sie_block->ckc < 521 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 522 __try_deliver_ckc_interrupt(vcpu); 523 524 if (atomic_read(&fi->active)) { 525 do { 526 deliver = 0; 527 spin_lock(&fi->lock); 528 list_for_each_entry_safe(inti, n, &fi->list, list) { 529 if (__interrupt_is_deliverable(vcpu, inti)) { 530 list_del(&inti->list); 531 deliver = 1; 532 break; 533 } 534 __set_intercept_indicator(vcpu, inti); 535 } 536 if (list_empty(&fi->list)) 537 atomic_set(&fi->active, 0); 538 spin_unlock(&fi->lock); 539 if (deliver) { 540 __do_deliver_interrupt(vcpu, inti); 541 kfree(inti); 542 } 543 } while (deliver); 544 } 545 } 546 547 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) 548 { 549 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 550 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 551 struct kvm_s390_interrupt_info *n, *inti = NULL; 552 int deliver; 553 554 __reset_intercept_indicators(vcpu); 555 if (atomic_read(&li->active)) { 556 do { 557 deliver = 0; 558 spin_lock_bh(&li->lock); 559 list_for_each_entry_safe(inti, n, &li->list, list) { 560 if ((inti->type == KVM_S390_MCHK) && 561 __interrupt_is_deliverable(vcpu, inti)) { 562 list_del(&inti->list); 563 deliver = 1; 564 break; 565 } 566 __set_intercept_indicator(vcpu, inti); 567 } 568 if (list_empty(&li->list)) 569 atomic_set(&li->active, 0); 570 spin_unlock_bh(&li->lock); 571 if (deliver) { 572 __do_deliver_interrupt(vcpu, inti); 573 kfree(inti); 574 } 575 } while (deliver); 576 } 577 578 if (atomic_read(&fi->active)) { 579 do { 580 deliver = 0; 581 spin_lock(&fi->lock); 582 list_for_each_entry_safe(inti, n, &fi->list, list) { 583 if ((inti->type == KVM_S390_MCHK) && 584 __interrupt_is_deliverable(vcpu, inti)) { 585 list_del(&inti->list); 586 deliver = 1; 587 break; 588 } 589 __set_intercept_indicator(vcpu, inti); 590 } 591 if (list_empty(&fi->list)) 592 atomic_set(&fi->active, 0); 593 spin_unlock(&fi->lock); 594 if (deliver) { 595 __do_deliver_interrupt(vcpu, inti); 596 kfree(inti); 597 } 598 } while (deliver); 599 } 600 } 601 602 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 603 { 604 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 605 struct kvm_s390_interrupt_info *inti; 606 607 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 608 if (!inti) 609 return -ENOMEM; 610 611 inti->type = KVM_S390_PROGRAM_INT; 612 inti->pgm.code = code; 613 614 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 615 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 616 spin_lock_bh(&li->lock); 617 list_add(&inti->list, &li->list); 618 atomic_set(&li->active, 1); 619 BUG_ON(waitqueue_active(li->wq)); 620 spin_unlock_bh(&li->lock); 621 return 0; 622 } 623 624 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 625 u64 cr6, u64 schid) 626 { 627 struct kvm_s390_float_interrupt *fi; 628 struct kvm_s390_interrupt_info *inti, *iter; 629 630 if ((!schid && !cr6) || (schid && cr6)) 631 return NULL; 632 mutex_lock(&kvm->lock); 633 fi = &kvm->arch.float_int; 634 spin_lock(&fi->lock); 635 inti = NULL; 636 list_for_each_entry(iter, &fi->list, list) { 637 if (!is_ioint(iter->type)) 638 continue; 639 if (cr6 && 640 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) 641 continue; 642 if (schid) { 643 if (((schid & 0x00000000ffff0000) >> 16) != 644 iter->io.subchannel_id) 645 continue; 646 if ((schid & 0x000000000000ffff) != 647 iter->io.subchannel_nr) 648 continue; 649 } 650 inti = iter; 651 break; 652 } 653 if (inti) 654 list_del_init(&inti->list); 655 if (list_empty(&fi->list)) 656 atomic_set(&fi->active, 0); 657 spin_unlock(&fi->lock); 658 mutex_unlock(&kvm->lock); 659 return inti; 660 } 661 662 int kvm_s390_inject_vm(struct kvm *kvm, 663 struct kvm_s390_interrupt *s390int) 664 { 665 struct kvm_s390_local_interrupt *li; 666 struct kvm_s390_float_interrupt *fi; 667 struct kvm_s390_interrupt_info *inti, *iter; 668 int sigcpu; 669 670 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 671 if (!inti) 672 return -ENOMEM; 673 674 switch (s390int->type) { 675 case KVM_S390_INT_VIRTIO: 676 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 677 s390int->parm, s390int->parm64); 678 inti->type = s390int->type; 679 inti->ext.ext_params = s390int->parm; 680 inti->ext.ext_params2 = s390int->parm64; 681 break; 682 case KVM_S390_INT_SERVICE: 683 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 684 inti->type = s390int->type; 685 inti->ext.ext_params = s390int->parm; 686 break; 687 case KVM_S390_PROGRAM_INT: 688 case KVM_S390_SIGP_STOP: 689 case KVM_S390_INT_EXTERNAL_CALL: 690 case KVM_S390_INT_EMERGENCY: 691 kfree(inti); 692 return -EINVAL; 693 case KVM_S390_MCHK: 694 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 695 s390int->parm64); 696 inti->type = s390int->type; 697 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 698 inti->mchk.mcic = s390int->parm64; 699 break; 700 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 701 if (s390int->type & IOINT_AI_MASK) 702 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 703 else 704 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 705 s390int->type & IOINT_CSSID_MASK, 706 s390int->type & IOINT_SSID_MASK, 707 s390int->type & IOINT_SCHID_MASK); 708 inti->type = s390int->type; 709 inti->io.subchannel_id = s390int->parm >> 16; 710 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 711 inti->io.io_int_parm = s390int->parm64 >> 32; 712 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 713 break; 714 default: 715 kfree(inti); 716 return -EINVAL; 717 } 718 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 719 2); 720 721 mutex_lock(&kvm->lock); 722 fi = &kvm->arch.float_int; 723 spin_lock(&fi->lock); 724 if (!is_ioint(inti->type)) 725 list_add_tail(&inti->list, &fi->list); 726 else { 727 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); 728 729 /* Keep I/O interrupts sorted in isc order. */ 730 list_for_each_entry(iter, &fi->list, list) { 731 if (!is_ioint(iter->type)) 732 continue; 733 if (int_word_to_isc_bits(iter->io.io_int_word) 734 <= isc_bits) 735 continue; 736 break; 737 } 738 list_add_tail(&inti->list, &iter->list); 739 } 740 atomic_set(&fi->active, 1); 741 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 742 if (sigcpu == KVM_MAX_VCPUS) { 743 do { 744 sigcpu = fi->next_rr_cpu++; 745 if (sigcpu == KVM_MAX_VCPUS) 746 sigcpu = fi->next_rr_cpu = 0; 747 } while (fi->local_int[sigcpu] == NULL); 748 } 749 li = fi->local_int[sigcpu]; 750 spin_lock_bh(&li->lock); 751 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 752 if (waitqueue_active(li->wq)) 753 wake_up_interruptible(li->wq); 754 spin_unlock_bh(&li->lock); 755 spin_unlock(&fi->lock); 756 mutex_unlock(&kvm->lock); 757 return 0; 758 } 759 760 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 761 struct kvm_s390_interrupt *s390int) 762 { 763 struct kvm_s390_local_interrupt *li; 764 struct kvm_s390_interrupt_info *inti; 765 766 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 767 if (!inti) 768 return -ENOMEM; 769 770 switch (s390int->type) { 771 case KVM_S390_PROGRAM_INT: 772 if (s390int->parm & 0xffff0000) { 773 kfree(inti); 774 return -EINVAL; 775 } 776 inti->type = s390int->type; 777 inti->pgm.code = s390int->parm; 778 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 779 s390int->parm); 780 break; 781 case KVM_S390_SIGP_SET_PREFIX: 782 inti->prefix.address = s390int->parm; 783 inti->type = s390int->type; 784 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 785 s390int->parm); 786 break; 787 case KVM_S390_SIGP_STOP: 788 case KVM_S390_RESTART: 789 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 790 inti->type = s390int->type; 791 break; 792 case KVM_S390_INT_EXTERNAL_CALL: 793 if (s390int->parm & 0xffff0000) { 794 kfree(inti); 795 return -EINVAL; 796 } 797 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 798 s390int->parm); 799 inti->type = s390int->type; 800 inti->extcall.code = s390int->parm; 801 break; 802 case KVM_S390_INT_EMERGENCY: 803 if (s390int->parm & 0xffff0000) { 804 kfree(inti); 805 return -EINVAL; 806 } 807 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); 808 inti->type = s390int->type; 809 inti->emerg.code = s390int->parm; 810 break; 811 case KVM_S390_MCHK: 812 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 813 s390int->parm64); 814 inti->type = s390int->type; 815 inti->mchk.mcic = s390int->parm64; 816 break; 817 case KVM_S390_INT_VIRTIO: 818 case KVM_S390_INT_SERVICE: 819 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 820 default: 821 kfree(inti); 822 return -EINVAL; 823 } 824 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 825 s390int->parm64, 2); 826 827 mutex_lock(&vcpu->kvm->lock); 828 li = &vcpu->arch.local_int; 829 spin_lock_bh(&li->lock); 830 if (inti->type == KVM_S390_PROGRAM_INT) 831 list_add(&inti->list, &li->list); 832 else 833 list_add_tail(&inti->list, &li->list); 834 atomic_set(&li->active, 1); 835 if (inti->type == KVM_S390_SIGP_STOP) 836 li->action_bits |= ACTION_STOP_ON_STOP; 837 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 838 if (waitqueue_active(&vcpu->wq)) 839 wake_up_interruptible(&vcpu->wq); 840 spin_unlock_bh(&li->lock); 841 mutex_unlock(&vcpu->kvm->lock); 842 return 0; 843 } 844