1 /* 2 * interrupt.c - handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/signal.h> 17 #include <linux/slab.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/uaccess.h> 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 23 static int psw_extint_disabled(struct kvm_vcpu *vcpu) 24 { 25 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 26 } 27 28 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 29 { 30 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 31 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 32 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 33 return 0; 34 return 1; 35 } 36 37 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 38 struct kvm_s390_interrupt_info *inti) 39 { 40 switch (inti->type) { 41 case KVM_S390_INT_EXTERNAL_CALL: 42 if (psw_extint_disabled(vcpu)) 43 return 0; 44 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 45 return 1; 46 case KVM_S390_INT_EMERGENCY: 47 if (psw_extint_disabled(vcpu)) 48 return 0; 49 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 50 return 1; 51 return 0; 52 case KVM_S390_INT_SERVICE: 53 if (psw_extint_disabled(vcpu)) 54 return 0; 55 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 56 return 1; 57 return 0; 58 case KVM_S390_INT_VIRTIO: 59 if (psw_extint_disabled(vcpu)) 60 return 0; 61 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 62 return 1; 63 return 0; 64 case KVM_S390_PROGRAM_INT: 65 case KVM_S390_SIGP_STOP: 66 case KVM_S390_SIGP_SET_PREFIX: 67 case KVM_S390_RESTART: 68 return 1; 69 default: 70 BUG(); 71 } 72 return 0; 73 } 74 75 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 76 { 77 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 78 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 79 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 80 } 81 82 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 83 { 84 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 85 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 86 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 87 } 88 89 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 90 { 91 atomic_clear_mask(CPUSTAT_ECALL_PEND | 92 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 93 &vcpu->arch.sie_block->cpuflags); 94 vcpu->arch.sie_block->lctl = 0x0000; 95 } 96 97 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 98 { 99 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 100 } 101 102 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 103 struct kvm_s390_interrupt_info *inti) 104 { 105 switch (inti->type) { 106 case KVM_S390_INT_EXTERNAL_CALL: 107 case KVM_S390_INT_EMERGENCY: 108 case KVM_S390_INT_SERVICE: 109 case KVM_S390_INT_VIRTIO: 110 if (psw_extint_disabled(vcpu)) 111 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 112 else 113 vcpu->arch.sie_block->lctl |= LCTL_CR0; 114 break; 115 case KVM_S390_SIGP_STOP: 116 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 117 break; 118 default: 119 BUG(); 120 } 121 } 122 123 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 124 struct kvm_s390_interrupt_info *inti) 125 { 126 const unsigned short table[] = { 2, 4, 4, 6 }; 127 int rc, exception = 0; 128 129 switch (inti->type) { 130 case KVM_S390_INT_EMERGENCY: 131 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 132 vcpu->stat.deliver_emergency_signal++; 133 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 134 if (rc == -EFAULT) 135 exception = 1; 136 137 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); 138 if (rc == -EFAULT) 139 exception = 1; 140 141 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 142 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 143 if (rc == -EFAULT) 144 exception = 1; 145 146 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 147 __LC_EXT_NEW_PSW, sizeof(psw_t)); 148 if (rc == -EFAULT) 149 exception = 1; 150 break; 151 152 case KVM_S390_INT_EXTERNAL_CALL: 153 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 154 vcpu->stat.deliver_external_call++; 155 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202); 156 if (rc == -EFAULT) 157 exception = 1; 158 159 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); 160 if (rc == -EFAULT) 161 exception = 1; 162 163 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 164 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 165 if (rc == -EFAULT) 166 exception = 1; 167 168 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 169 __LC_EXT_NEW_PSW, sizeof(psw_t)); 170 if (rc == -EFAULT) 171 exception = 1; 172 break; 173 174 case KVM_S390_INT_SERVICE: 175 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 176 inti->ext.ext_params); 177 vcpu->stat.deliver_service_signal++; 178 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 179 if (rc == -EFAULT) 180 exception = 1; 181 182 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 183 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 184 if (rc == -EFAULT) 185 exception = 1; 186 187 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 188 __LC_EXT_NEW_PSW, sizeof(psw_t)); 189 if (rc == -EFAULT) 190 exception = 1; 191 192 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 193 if (rc == -EFAULT) 194 exception = 1; 195 break; 196 197 case KVM_S390_INT_VIRTIO: 198 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 199 inti->ext.ext_params, inti->ext.ext_params2); 200 vcpu->stat.deliver_virtio_interrupt++; 201 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 202 if (rc == -EFAULT) 203 exception = 1; 204 205 rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); 206 if (rc == -EFAULT) 207 exception = 1; 208 209 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 210 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 211 if (rc == -EFAULT) 212 exception = 1; 213 214 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 215 __LC_EXT_NEW_PSW, sizeof(psw_t)); 216 if (rc == -EFAULT) 217 exception = 1; 218 219 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 220 if (rc == -EFAULT) 221 exception = 1; 222 223 rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2, 224 inti->ext.ext_params2); 225 if (rc == -EFAULT) 226 exception = 1; 227 break; 228 229 case KVM_S390_SIGP_STOP: 230 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 231 vcpu->stat.deliver_stop_signal++; 232 __set_intercept_indicator(vcpu, inti); 233 break; 234 235 case KVM_S390_SIGP_SET_PREFIX: 236 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 237 inti->prefix.address); 238 vcpu->stat.deliver_prefix_signal++; 239 kvm_s390_set_prefix(vcpu, inti->prefix.address); 240 break; 241 242 case KVM_S390_RESTART: 243 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 244 vcpu->stat.deliver_restart_signal++; 245 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 246 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 247 if (rc == -EFAULT) 248 exception = 1; 249 250 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 251 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 252 if (rc == -EFAULT) 253 exception = 1; 254 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 255 break; 256 257 case KVM_S390_PROGRAM_INT: 258 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 259 inti->pgm.code, 260 table[vcpu->arch.sie_block->ipa >> 14]); 261 vcpu->stat.deliver_program_int++; 262 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 263 if (rc == -EFAULT) 264 exception = 1; 265 266 rc = put_guest_u16(vcpu, __LC_PGM_ILC, 267 table[vcpu->arch.sie_block->ipa >> 14]); 268 if (rc == -EFAULT) 269 exception = 1; 270 271 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, 272 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 273 if (rc == -EFAULT) 274 exception = 1; 275 276 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 277 __LC_PGM_NEW_PSW, sizeof(psw_t)); 278 if (rc == -EFAULT) 279 exception = 1; 280 break; 281 282 default: 283 BUG(); 284 } 285 if (exception) { 286 printk("kvm: The guest lowcore is not mapped during interrupt " 287 "delivery, killing userspace\n"); 288 do_exit(SIGKILL); 289 } 290 } 291 292 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 293 { 294 int rc, exception = 0; 295 296 if (psw_extint_disabled(vcpu)) 297 return 0; 298 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 299 return 0; 300 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); 301 if (rc == -EFAULT) 302 exception = 1; 303 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 305 if (rc == -EFAULT) 306 exception = 1; 307 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 308 __LC_EXT_NEW_PSW, sizeof(psw_t)); 309 if (rc == -EFAULT) 310 exception = 1; 311 if (exception) { 312 printk("kvm: The guest lowcore is not mapped during interrupt " 313 "delivery, killing userspace\n"); 314 do_exit(SIGKILL); 315 } 316 return 1; 317 } 318 319 static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 320 { 321 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 322 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 323 struct kvm_s390_interrupt_info *inti; 324 int rc = 0; 325 326 if (atomic_read(&li->active)) { 327 spin_lock_bh(&li->lock); 328 list_for_each_entry(inti, &li->list, list) 329 if (__interrupt_is_deliverable(vcpu, inti)) { 330 rc = 1; 331 break; 332 } 333 spin_unlock_bh(&li->lock); 334 } 335 336 if ((!rc) && atomic_read(&fi->active)) { 337 spin_lock(&fi->lock); 338 list_for_each_entry(inti, &fi->list, list) 339 if (__interrupt_is_deliverable(vcpu, inti)) { 340 rc = 1; 341 break; 342 } 343 spin_unlock(&fi->lock); 344 } 345 346 if ((!rc) && (vcpu->arch.sie_block->ckc < 347 get_clock() + vcpu->arch.sie_block->epoch)) { 348 if ((!psw_extint_disabled(vcpu)) && 349 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 350 rc = 1; 351 } 352 353 return rc; 354 } 355 356 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 357 { 358 return 0; 359 } 360 361 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 362 { 363 u64 now, sltime; 364 DECLARE_WAITQUEUE(wait, current); 365 366 vcpu->stat.exit_wait_state++; 367 if (kvm_cpu_has_interrupt(vcpu)) 368 return 0; 369 370 __set_cpu_idle(vcpu); 371 spin_lock_bh(&vcpu->arch.local_int.lock); 372 vcpu->arch.local_int.timer_due = 0; 373 spin_unlock_bh(&vcpu->arch.local_int.lock); 374 375 if (psw_interrupts_disabled(vcpu)) { 376 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 377 __unset_cpu_idle(vcpu); 378 return -EOPNOTSUPP; /* disabled wait */ 379 } 380 381 if (psw_extint_disabled(vcpu) || 382 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { 383 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 384 goto no_timer; 385 } 386 387 now = get_clock() + vcpu->arch.sie_block->epoch; 388 if (vcpu->arch.sie_block->ckc < now) { 389 __unset_cpu_idle(vcpu); 390 return 0; 391 } 392 393 sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; 394 395 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 396 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 397 no_timer: 398 spin_lock(&vcpu->arch.local_int.float_int->lock); 399 spin_lock_bh(&vcpu->arch.local_int.lock); 400 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 401 while (list_empty(&vcpu->arch.local_int.list) && 402 list_empty(&vcpu->arch.local_int.float_int->list) && 403 (!vcpu->arch.local_int.timer_due) && 404 !signal_pending(current)) { 405 set_current_state(TASK_INTERRUPTIBLE); 406 spin_unlock_bh(&vcpu->arch.local_int.lock); 407 spin_unlock(&vcpu->arch.local_int.float_int->lock); 408 vcpu_put(vcpu); 409 schedule(); 410 vcpu_load(vcpu); 411 spin_lock(&vcpu->arch.local_int.float_int->lock); 412 spin_lock_bh(&vcpu->arch.local_int.lock); 413 } 414 __unset_cpu_idle(vcpu); 415 __set_current_state(TASK_RUNNING); 416 remove_wait_queue(&vcpu->arch.local_int.wq, &wait); 417 spin_unlock_bh(&vcpu->arch.local_int.lock); 418 spin_unlock(&vcpu->arch.local_int.float_int->lock); 419 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 420 return 0; 421 } 422 423 void kvm_s390_tasklet(unsigned long parm) 424 { 425 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 426 427 spin_lock(&vcpu->arch.local_int.lock); 428 vcpu->arch.local_int.timer_due = 1; 429 if (waitqueue_active(&vcpu->arch.local_int.wq)) 430 wake_up_interruptible(&vcpu->arch.local_int.wq); 431 spin_unlock(&vcpu->arch.local_int.lock); 432 } 433 434 /* 435 * low level hrtimer wake routine. Because this runs in hardirq context 436 * we schedule a tasklet to do the real work. 437 */ 438 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 439 { 440 struct kvm_vcpu *vcpu; 441 442 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 443 tasklet_schedule(&vcpu->arch.tasklet); 444 445 return HRTIMER_NORESTART; 446 } 447 448 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 449 { 450 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 451 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 452 struct kvm_s390_interrupt_info *n, *inti = NULL; 453 int deliver; 454 455 __reset_intercept_indicators(vcpu); 456 if (atomic_read(&li->active)) { 457 do { 458 deliver = 0; 459 spin_lock_bh(&li->lock); 460 list_for_each_entry_safe(inti, n, &li->list, list) { 461 if (__interrupt_is_deliverable(vcpu, inti)) { 462 list_del(&inti->list); 463 deliver = 1; 464 break; 465 } 466 __set_intercept_indicator(vcpu, inti); 467 } 468 if (list_empty(&li->list)) 469 atomic_set(&li->active, 0); 470 spin_unlock_bh(&li->lock); 471 if (deliver) { 472 __do_deliver_interrupt(vcpu, inti); 473 kfree(inti); 474 } 475 } while (deliver); 476 } 477 478 if ((vcpu->arch.sie_block->ckc < 479 get_clock() + vcpu->arch.sie_block->epoch)) 480 __try_deliver_ckc_interrupt(vcpu); 481 482 if (atomic_read(&fi->active)) { 483 do { 484 deliver = 0; 485 spin_lock(&fi->lock); 486 list_for_each_entry_safe(inti, n, &fi->list, list) { 487 if (__interrupt_is_deliverable(vcpu, inti)) { 488 list_del(&inti->list); 489 deliver = 1; 490 break; 491 } 492 __set_intercept_indicator(vcpu, inti); 493 } 494 if (list_empty(&fi->list)) 495 atomic_set(&fi->active, 0); 496 spin_unlock(&fi->lock); 497 if (deliver) { 498 __do_deliver_interrupt(vcpu, inti); 499 kfree(inti); 500 } 501 } while (deliver); 502 } 503 } 504 505 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 506 { 507 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 508 struct kvm_s390_interrupt_info *inti; 509 510 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 511 if (!inti) 512 return -ENOMEM; 513 514 inti->type = KVM_S390_PROGRAM_INT; 515 inti->pgm.code = code; 516 517 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 518 spin_lock_bh(&li->lock); 519 list_add(&inti->list, &li->list); 520 atomic_set(&li->active, 1); 521 BUG_ON(waitqueue_active(&li->wq)); 522 spin_unlock_bh(&li->lock); 523 return 0; 524 } 525 526 int kvm_s390_inject_vm(struct kvm *kvm, 527 struct kvm_s390_interrupt *s390int) 528 { 529 struct kvm_s390_local_interrupt *li; 530 struct kvm_s390_float_interrupt *fi; 531 struct kvm_s390_interrupt_info *inti; 532 int sigcpu; 533 534 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 535 if (!inti) 536 return -ENOMEM; 537 538 switch (s390int->type) { 539 case KVM_S390_INT_VIRTIO: 540 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 541 s390int->parm, s390int->parm64); 542 inti->type = s390int->type; 543 inti->ext.ext_params = s390int->parm; 544 inti->ext.ext_params2 = s390int->parm64; 545 break; 546 case KVM_S390_INT_SERVICE: 547 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 548 inti->type = s390int->type; 549 inti->ext.ext_params = s390int->parm; 550 break; 551 case KVM_S390_PROGRAM_INT: 552 case KVM_S390_SIGP_STOP: 553 case KVM_S390_INT_EXTERNAL_CALL: 554 case KVM_S390_INT_EMERGENCY: 555 default: 556 kfree(inti); 557 return -EINVAL; 558 } 559 560 mutex_lock(&kvm->lock); 561 fi = &kvm->arch.float_int; 562 spin_lock(&fi->lock); 563 list_add_tail(&inti->list, &fi->list); 564 atomic_set(&fi->active, 1); 565 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 566 if (sigcpu == KVM_MAX_VCPUS) { 567 do { 568 sigcpu = fi->next_rr_cpu++; 569 if (sigcpu == KVM_MAX_VCPUS) 570 sigcpu = fi->next_rr_cpu = 0; 571 } while (fi->local_int[sigcpu] == NULL); 572 } 573 li = fi->local_int[sigcpu]; 574 spin_lock_bh(&li->lock); 575 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 576 if (waitqueue_active(&li->wq)) 577 wake_up_interruptible(&li->wq); 578 spin_unlock_bh(&li->lock); 579 spin_unlock(&fi->lock); 580 mutex_unlock(&kvm->lock); 581 return 0; 582 } 583 584 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 585 struct kvm_s390_interrupt *s390int) 586 { 587 struct kvm_s390_local_interrupt *li; 588 struct kvm_s390_interrupt_info *inti; 589 590 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 591 if (!inti) 592 return -ENOMEM; 593 594 switch (s390int->type) { 595 case KVM_S390_PROGRAM_INT: 596 if (s390int->parm & 0xffff0000) { 597 kfree(inti); 598 return -EINVAL; 599 } 600 inti->type = s390int->type; 601 inti->pgm.code = s390int->parm; 602 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 603 s390int->parm); 604 break; 605 case KVM_S390_SIGP_SET_PREFIX: 606 inti->prefix.address = s390int->parm; 607 inti->type = s390int->type; 608 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 609 s390int->parm); 610 break; 611 case KVM_S390_SIGP_STOP: 612 case KVM_S390_RESTART: 613 case KVM_S390_INT_EXTERNAL_CALL: 614 case KVM_S390_INT_EMERGENCY: 615 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 616 inti->type = s390int->type; 617 break; 618 case KVM_S390_INT_VIRTIO: 619 case KVM_S390_INT_SERVICE: 620 default: 621 kfree(inti); 622 return -EINVAL; 623 } 624 625 mutex_lock(&vcpu->kvm->lock); 626 li = &vcpu->arch.local_int; 627 spin_lock_bh(&li->lock); 628 if (inti->type == KVM_S390_PROGRAM_INT) 629 list_add(&inti->list, &li->list); 630 else 631 list_add_tail(&inti->list, &li->list); 632 atomic_set(&li->active, 1); 633 if (inti->type == KVM_S390_SIGP_STOP) 634 li->action_bits |= ACTION_STOP_ON_STOP; 635 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 636 if (waitqueue_active(&li->wq)) 637 wake_up_interruptible(&vcpu->arch.local_int.wq); 638 spin_unlock_bh(&li->lock); 639 mutex_unlock(&vcpu->kvm->lock); 640 return 0; 641 } 642