1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/signal.h> 17 #include <linux/slab.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/uaccess.h> 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 #include "trace-s390.h" 23 24 static int psw_extint_disabled(struct kvm_vcpu *vcpu) 25 { 26 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 27 } 28 29 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 30 { 31 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 32 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 33 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 34 return 0; 35 return 1; 36 } 37 38 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 39 struct kvm_s390_interrupt_info *inti) 40 { 41 switch (inti->type) { 42 case KVM_S390_INT_EXTERNAL_CALL: 43 if (psw_extint_disabled(vcpu)) 44 return 0; 45 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 46 return 1; 47 case KVM_S390_INT_EMERGENCY: 48 if (psw_extint_disabled(vcpu)) 49 return 0; 50 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 51 return 1; 52 return 0; 53 case KVM_S390_INT_SERVICE: 54 if (psw_extint_disabled(vcpu)) 55 return 0; 56 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 57 return 1; 58 return 0; 59 case KVM_S390_INT_VIRTIO: 60 if (psw_extint_disabled(vcpu)) 61 return 0; 62 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 63 return 1; 64 return 0; 65 case KVM_S390_PROGRAM_INT: 66 case KVM_S390_SIGP_STOP: 67 case KVM_S390_SIGP_SET_PREFIX: 68 case KVM_S390_RESTART: 69 return 1; 70 default: 71 BUG(); 72 } 73 return 0; 74 } 75 76 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 77 { 78 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 79 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 80 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 81 } 82 83 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 84 { 85 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 86 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 87 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 88 } 89 90 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 91 { 92 atomic_clear_mask(CPUSTAT_ECALL_PEND | 93 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 94 &vcpu->arch.sie_block->cpuflags); 95 vcpu->arch.sie_block->lctl = 0x0000; 96 } 97 98 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 99 { 100 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 101 } 102 103 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 104 struct kvm_s390_interrupt_info *inti) 105 { 106 switch (inti->type) { 107 case KVM_S390_INT_EXTERNAL_CALL: 108 case KVM_S390_INT_EMERGENCY: 109 case KVM_S390_INT_SERVICE: 110 case KVM_S390_INT_VIRTIO: 111 if (psw_extint_disabled(vcpu)) 112 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 113 else 114 vcpu->arch.sie_block->lctl |= LCTL_CR0; 115 break; 116 case KVM_S390_SIGP_STOP: 117 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 118 break; 119 default: 120 BUG(); 121 } 122 } 123 124 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 125 struct kvm_s390_interrupt_info *inti) 126 { 127 const unsigned short table[] = { 2, 4, 4, 6 }; 128 int rc, exception = 0; 129 130 switch (inti->type) { 131 case KVM_S390_INT_EMERGENCY: 132 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 133 vcpu->stat.deliver_emergency_signal++; 134 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 135 inti->emerg.code, 0); 136 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 137 if (rc == -EFAULT) 138 exception = 1; 139 140 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); 141 if (rc == -EFAULT) 142 exception = 1; 143 144 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 145 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 146 if (rc == -EFAULT) 147 exception = 1; 148 149 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 150 __LC_EXT_NEW_PSW, sizeof(psw_t)); 151 if (rc == -EFAULT) 152 exception = 1; 153 break; 154 155 case KVM_S390_INT_EXTERNAL_CALL: 156 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 157 vcpu->stat.deliver_external_call++; 158 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 159 inti->extcall.code, 0); 160 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); 161 if (rc == -EFAULT) 162 exception = 1; 163 164 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); 165 if (rc == -EFAULT) 166 exception = 1; 167 168 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 169 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 170 if (rc == -EFAULT) 171 exception = 1; 172 173 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 174 __LC_EXT_NEW_PSW, sizeof(psw_t)); 175 if (rc == -EFAULT) 176 exception = 1; 177 break; 178 179 case KVM_S390_INT_SERVICE: 180 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 181 inti->ext.ext_params); 182 vcpu->stat.deliver_service_signal++; 183 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 184 inti->ext.ext_params, 0); 185 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 186 if (rc == -EFAULT) 187 exception = 1; 188 189 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 190 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 191 if (rc == -EFAULT) 192 exception = 1; 193 194 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 195 __LC_EXT_NEW_PSW, sizeof(psw_t)); 196 if (rc == -EFAULT) 197 exception = 1; 198 199 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 200 if (rc == -EFAULT) 201 exception = 1; 202 break; 203 204 case KVM_S390_INT_VIRTIO: 205 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 206 inti->ext.ext_params, inti->ext.ext_params2); 207 vcpu->stat.deliver_virtio_interrupt++; 208 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 209 inti->ext.ext_params, 210 inti->ext.ext_params2); 211 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 212 if (rc == -EFAULT) 213 exception = 1; 214 215 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); 216 if (rc == -EFAULT) 217 exception = 1; 218 219 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 220 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 221 if (rc == -EFAULT) 222 exception = 1; 223 224 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 225 __LC_EXT_NEW_PSW, sizeof(psw_t)); 226 if (rc == -EFAULT) 227 exception = 1; 228 229 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 230 if (rc == -EFAULT) 231 exception = 1; 232 233 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2, 234 inti->ext.ext_params2); 235 if (rc == -EFAULT) 236 exception = 1; 237 break; 238 239 case KVM_S390_SIGP_STOP: 240 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 241 vcpu->stat.deliver_stop_signal++; 242 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 243 0, 0); 244 __set_intercept_indicator(vcpu, inti); 245 break; 246 247 case KVM_S390_SIGP_SET_PREFIX: 248 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 249 inti->prefix.address); 250 vcpu->stat.deliver_prefix_signal++; 251 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 252 inti->prefix.address, 0); 253 kvm_s390_set_prefix(vcpu, inti->prefix.address); 254 break; 255 256 case KVM_S390_RESTART: 257 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 258 vcpu->stat.deliver_restart_signal++; 259 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 260 0, 0); 261 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 262 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 263 if (rc == -EFAULT) 264 exception = 1; 265 266 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 267 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 268 if (rc == -EFAULT) 269 exception = 1; 270 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 271 break; 272 273 case KVM_S390_PROGRAM_INT: 274 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 275 inti->pgm.code, 276 table[vcpu->arch.sie_block->ipa >> 14]); 277 vcpu->stat.deliver_program_int++; 278 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 279 inti->pgm.code, 0); 280 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 281 if (rc == -EFAULT) 282 exception = 1; 283 284 rc = put_guest_u16(vcpu, __LC_PGM_ILC, 285 table[vcpu->arch.sie_block->ipa >> 14]); 286 if (rc == -EFAULT) 287 exception = 1; 288 289 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, 290 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 291 if (rc == -EFAULT) 292 exception = 1; 293 294 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 295 __LC_PGM_NEW_PSW, sizeof(psw_t)); 296 if (rc == -EFAULT) 297 exception = 1; 298 break; 299 300 default: 301 BUG(); 302 } 303 if (exception) { 304 printk("kvm: The guest lowcore is not mapped during interrupt " 305 "delivery, killing userspace\n"); 306 do_exit(SIGKILL); 307 } 308 } 309 310 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 311 { 312 int rc, exception = 0; 313 314 if (psw_extint_disabled(vcpu)) 315 return 0; 316 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 317 return 0; 318 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); 319 if (rc == -EFAULT) 320 exception = 1; 321 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 322 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 323 if (rc == -EFAULT) 324 exception = 1; 325 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 326 __LC_EXT_NEW_PSW, sizeof(psw_t)); 327 if (rc == -EFAULT) 328 exception = 1; 329 if (exception) { 330 printk("kvm: The guest lowcore is not mapped during interrupt " 331 "delivery, killing userspace\n"); 332 do_exit(SIGKILL); 333 } 334 return 1; 335 } 336 337 static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 338 { 339 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 340 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 341 struct kvm_s390_interrupt_info *inti; 342 int rc = 0; 343 344 if (atomic_read(&li->active)) { 345 spin_lock_bh(&li->lock); 346 list_for_each_entry(inti, &li->list, list) 347 if (__interrupt_is_deliverable(vcpu, inti)) { 348 rc = 1; 349 break; 350 } 351 spin_unlock_bh(&li->lock); 352 } 353 354 if ((!rc) && atomic_read(&fi->active)) { 355 spin_lock(&fi->lock); 356 list_for_each_entry(inti, &fi->list, list) 357 if (__interrupt_is_deliverable(vcpu, inti)) { 358 rc = 1; 359 break; 360 } 361 spin_unlock(&fi->lock); 362 } 363 364 if ((!rc) && (vcpu->arch.sie_block->ckc < 365 get_clock() + vcpu->arch.sie_block->epoch)) { 366 if ((!psw_extint_disabled(vcpu)) && 367 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 368 rc = 1; 369 } 370 371 return rc; 372 } 373 374 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 375 { 376 return 0; 377 } 378 379 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 380 { 381 u64 now, sltime; 382 DECLARE_WAITQUEUE(wait, current); 383 384 vcpu->stat.exit_wait_state++; 385 if (kvm_cpu_has_interrupt(vcpu)) 386 return 0; 387 388 __set_cpu_idle(vcpu); 389 spin_lock_bh(&vcpu->arch.local_int.lock); 390 vcpu->arch.local_int.timer_due = 0; 391 spin_unlock_bh(&vcpu->arch.local_int.lock); 392 393 if (psw_interrupts_disabled(vcpu)) { 394 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 395 __unset_cpu_idle(vcpu); 396 return -EOPNOTSUPP; /* disabled wait */ 397 } 398 399 if (psw_extint_disabled(vcpu) || 400 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { 401 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 402 goto no_timer; 403 } 404 405 now = get_clock() + vcpu->arch.sie_block->epoch; 406 if (vcpu->arch.sie_block->ckc < now) { 407 __unset_cpu_idle(vcpu); 408 return 0; 409 } 410 411 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; 412 413 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 414 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 415 no_timer: 416 spin_lock(&vcpu->arch.local_int.float_int->lock); 417 spin_lock_bh(&vcpu->arch.local_int.lock); 418 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 419 while (list_empty(&vcpu->arch.local_int.list) && 420 list_empty(&vcpu->arch.local_int.float_int->list) && 421 (!vcpu->arch.local_int.timer_due) && 422 !signal_pending(current)) { 423 set_current_state(TASK_INTERRUPTIBLE); 424 spin_unlock_bh(&vcpu->arch.local_int.lock); 425 spin_unlock(&vcpu->arch.local_int.float_int->lock); 426 schedule(); 427 spin_lock(&vcpu->arch.local_int.float_int->lock); 428 spin_lock_bh(&vcpu->arch.local_int.lock); 429 } 430 __unset_cpu_idle(vcpu); 431 __set_current_state(TASK_RUNNING); 432 remove_wait_queue(&vcpu->arch.local_int.wq, &wait); 433 spin_unlock_bh(&vcpu->arch.local_int.lock); 434 spin_unlock(&vcpu->arch.local_int.float_int->lock); 435 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 436 return 0; 437 } 438 439 void kvm_s390_tasklet(unsigned long parm) 440 { 441 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 442 443 spin_lock(&vcpu->arch.local_int.lock); 444 vcpu->arch.local_int.timer_due = 1; 445 if (waitqueue_active(&vcpu->arch.local_int.wq)) 446 wake_up_interruptible(&vcpu->arch.local_int.wq); 447 spin_unlock(&vcpu->arch.local_int.lock); 448 } 449 450 /* 451 * low level hrtimer wake routine. Because this runs in hardirq context 452 * we schedule a tasklet to do the real work. 453 */ 454 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 455 { 456 struct kvm_vcpu *vcpu; 457 458 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 459 tasklet_schedule(&vcpu->arch.tasklet); 460 461 return HRTIMER_NORESTART; 462 } 463 464 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 465 { 466 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 467 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 468 struct kvm_s390_interrupt_info *n, *inti = NULL; 469 int deliver; 470 471 __reset_intercept_indicators(vcpu); 472 if (atomic_read(&li->active)) { 473 do { 474 deliver = 0; 475 spin_lock_bh(&li->lock); 476 list_for_each_entry_safe(inti, n, &li->list, list) { 477 if (__interrupt_is_deliverable(vcpu, inti)) { 478 list_del(&inti->list); 479 deliver = 1; 480 break; 481 } 482 __set_intercept_indicator(vcpu, inti); 483 } 484 if (list_empty(&li->list)) 485 atomic_set(&li->active, 0); 486 spin_unlock_bh(&li->lock); 487 if (deliver) { 488 __do_deliver_interrupt(vcpu, inti); 489 kfree(inti); 490 } 491 } while (deliver); 492 } 493 494 if ((vcpu->arch.sie_block->ckc < 495 get_clock() + vcpu->arch.sie_block->epoch)) 496 __try_deliver_ckc_interrupt(vcpu); 497 498 if (atomic_read(&fi->active)) { 499 do { 500 deliver = 0; 501 spin_lock(&fi->lock); 502 list_for_each_entry_safe(inti, n, &fi->list, list) { 503 if (__interrupt_is_deliverable(vcpu, inti)) { 504 list_del(&inti->list); 505 deliver = 1; 506 break; 507 } 508 __set_intercept_indicator(vcpu, inti); 509 } 510 if (list_empty(&fi->list)) 511 atomic_set(&fi->active, 0); 512 spin_unlock(&fi->lock); 513 if (deliver) { 514 __do_deliver_interrupt(vcpu, inti); 515 kfree(inti); 516 } 517 } while (deliver); 518 } 519 } 520 521 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 522 { 523 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 524 struct kvm_s390_interrupt_info *inti; 525 526 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 527 if (!inti) 528 return -ENOMEM; 529 530 inti->type = KVM_S390_PROGRAM_INT; 531 inti->pgm.code = code; 532 533 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 534 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 535 spin_lock_bh(&li->lock); 536 list_add(&inti->list, &li->list); 537 atomic_set(&li->active, 1); 538 BUG_ON(waitqueue_active(&li->wq)); 539 spin_unlock_bh(&li->lock); 540 return 0; 541 } 542 543 int kvm_s390_inject_vm(struct kvm *kvm, 544 struct kvm_s390_interrupt *s390int) 545 { 546 struct kvm_s390_local_interrupt *li; 547 struct kvm_s390_float_interrupt *fi; 548 struct kvm_s390_interrupt_info *inti; 549 int sigcpu; 550 551 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 552 if (!inti) 553 return -ENOMEM; 554 555 switch (s390int->type) { 556 case KVM_S390_INT_VIRTIO: 557 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 558 s390int->parm, s390int->parm64); 559 inti->type = s390int->type; 560 inti->ext.ext_params = s390int->parm; 561 inti->ext.ext_params2 = s390int->parm64; 562 break; 563 case KVM_S390_INT_SERVICE: 564 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 565 inti->type = s390int->type; 566 inti->ext.ext_params = s390int->parm; 567 break; 568 case KVM_S390_PROGRAM_INT: 569 case KVM_S390_SIGP_STOP: 570 case KVM_S390_INT_EXTERNAL_CALL: 571 case KVM_S390_INT_EMERGENCY: 572 default: 573 kfree(inti); 574 return -EINVAL; 575 } 576 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 577 2); 578 579 mutex_lock(&kvm->lock); 580 fi = &kvm->arch.float_int; 581 spin_lock(&fi->lock); 582 list_add_tail(&inti->list, &fi->list); 583 atomic_set(&fi->active, 1); 584 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 585 if (sigcpu == KVM_MAX_VCPUS) { 586 do { 587 sigcpu = fi->next_rr_cpu++; 588 if (sigcpu == KVM_MAX_VCPUS) 589 sigcpu = fi->next_rr_cpu = 0; 590 } while (fi->local_int[sigcpu] == NULL); 591 } 592 li = fi->local_int[sigcpu]; 593 spin_lock_bh(&li->lock); 594 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 595 if (waitqueue_active(&li->wq)) 596 wake_up_interruptible(&li->wq); 597 spin_unlock_bh(&li->lock); 598 spin_unlock(&fi->lock); 599 mutex_unlock(&kvm->lock); 600 return 0; 601 } 602 603 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 604 struct kvm_s390_interrupt *s390int) 605 { 606 struct kvm_s390_local_interrupt *li; 607 struct kvm_s390_interrupt_info *inti; 608 609 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 610 if (!inti) 611 return -ENOMEM; 612 613 switch (s390int->type) { 614 case KVM_S390_PROGRAM_INT: 615 if (s390int->parm & 0xffff0000) { 616 kfree(inti); 617 return -EINVAL; 618 } 619 inti->type = s390int->type; 620 inti->pgm.code = s390int->parm; 621 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 622 s390int->parm); 623 break; 624 case KVM_S390_SIGP_SET_PREFIX: 625 inti->prefix.address = s390int->parm; 626 inti->type = s390int->type; 627 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 628 s390int->parm); 629 break; 630 case KVM_S390_SIGP_STOP: 631 case KVM_S390_RESTART: 632 case KVM_S390_INT_EXTERNAL_CALL: 633 case KVM_S390_INT_EMERGENCY: 634 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 635 inti->type = s390int->type; 636 break; 637 case KVM_S390_INT_VIRTIO: 638 case KVM_S390_INT_SERVICE: 639 default: 640 kfree(inti); 641 return -EINVAL; 642 } 643 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 644 s390int->parm64, 2); 645 646 mutex_lock(&vcpu->kvm->lock); 647 li = &vcpu->arch.local_int; 648 spin_lock_bh(&li->lock); 649 if (inti->type == KVM_S390_PROGRAM_INT) 650 list_add(&inti->list, &li->list); 651 else 652 list_add_tail(&inti->list, &li->list); 653 atomic_set(&li->active, 1); 654 if (inti->type == KVM_S390_SIGP_STOP) 655 li->action_bits |= ACTION_STOP_ON_STOP; 656 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 657 if (waitqueue_active(&li->wq)) 658 wake_up_interruptible(&vcpu->arch.local_int.wq); 659 spin_unlock_bh(&li->lock); 660 mutex_unlock(&vcpu->kvm->lock); 661 return 0; 662 } 663