1 /* 2 * interrupt.c - handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <asm/lowcore.h> 14 #include <asm/uaccess.h> 15 #include <linux/kvm_host.h> 16 #include <linux/signal.h> 17 #include "kvm-s390.h" 18 #include "gaccess.h" 19 20 static int psw_extint_disabled(struct kvm_vcpu *vcpu) 21 { 22 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 23 } 24 25 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 26 { 27 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 28 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 29 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 30 return 0; 31 return 1; 32 } 33 34 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 35 struct kvm_s390_interrupt_info *inti) 36 { 37 switch (inti->type) { 38 case KVM_S390_INT_EMERGENCY: 39 if (psw_extint_disabled(vcpu)) 40 return 0; 41 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 42 return 1; 43 return 0; 44 case KVM_S390_INT_SERVICE: 45 if (psw_extint_disabled(vcpu)) 46 return 0; 47 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 48 return 1; 49 return 0; 50 case KVM_S390_INT_VIRTIO: 51 if (psw_extint_disabled(vcpu)) 52 return 0; 53 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 54 return 1; 55 return 0; 56 case KVM_S390_PROGRAM_INT: 57 case KVM_S390_SIGP_STOP: 58 case KVM_S390_SIGP_SET_PREFIX: 59 case KVM_S390_RESTART: 60 return 1; 61 default: 62 BUG(); 63 } 64 return 0; 65 } 66 67 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 68 { 69 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 70 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 71 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 72 } 73 74 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 75 { 76 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 77 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 78 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 79 } 80 81 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 82 { 83 atomic_clear_mask(CPUSTAT_ECALL_PEND | 84 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 85 &vcpu->arch.sie_block->cpuflags); 86 vcpu->arch.sie_block->lctl = 0x0000; 87 } 88 89 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 90 { 91 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 92 } 93 94 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 95 struct kvm_s390_interrupt_info *inti) 96 { 97 switch (inti->type) { 98 case KVM_S390_INT_EMERGENCY: 99 case KVM_S390_INT_SERVICE: 100 case KVM_S390_INT_VIRTIO: 101 if (psw_extint_disabled(vcpu)) 102 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 103 else 104 vcpu->arch.sie_block->lctl |= LCTL_CR0; 105 break; 106 case KVM_S390_SIGP_STOP: 107 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 108 break; 109 default: 110 BUG(); 111 } 112 } 113 114 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 115 struct kvm_s390_interrupt_info *inti) 116 { 117 const unsigned short table[] = { 2, 4, 4, 6 }; 118 int rc, exception = 0; 119 120 switch (inti->type) { 121 case KVM_S390_INT_EMERGENCY: 122 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 123 vcpu->stat.deliver_emergency_signal++; 124 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201); 125 if (rc == -EFAULT) 126 exception = 1; 127 128 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 129 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 130 if (rc == -EFAULT) 131 exception = 1; 132 133 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 134 __LC_EXT_NEW_PSW, sizeof(psw_t)); 135 if (rc == -EFAULT) 136 exception = 1; 137 break; 138 139 case KVM_S390_INT_SERVICE: 140 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 141 inti->ext.ext_params); 142 vcpu->stat.deliver_service_signal++; 143 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401); 144 if (rc == -EFAULT) 145 exception = 1; 146 147 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 148 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 149 if (rc == -EFAULT) 150 exception = 1; 151 152 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 153 __LC_EXT_NEW_PSW, sizeof(psw_t)); 154 if (rc == -EFAULT) 155 exception = 1; 156 157 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 158 if (rc == -EFAULT) 159 exception = 1; 160 break; 161 162 case KVM_S390_INT_VIRTIO: 163 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 164 inti->ext.ext_params, inti->ext.ext_params2); 165 vcpu->stat.deliver_virtio_interrupt++; 166 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); 167 if (rc == -EFAULT) 168 exception = 1; 169 170 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00); 171 if (rc == -EFAULT) 172 exception = 1; 173 174 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 175 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 176 if (rc == -EFAULT) 177 exception = 1; 178 179 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 180 __LC_EXT_NEW_PSW, sizeof(psw_t)); 181 if (rc == -EFAULT) 182 exception = 1; 183 184 rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params); 185 if (rc == -EFAULT) 186 exception = 1; 187 188 rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, 189 inti->ext.ext_params2); 190 if (rc == -EFAULT) 191 exception = 1; 192 break; 193 194 case KVM_S390_SIGP_STOP: 195 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 196 vcpu->stat.deliver_stop_signal++; 197 __set_intercept_indicator(vcpu, inti); 198 break; 199 200 case KVM_S390_SIGP_SET_PREFIX: 201 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 202 inti->prefix.address); 203 vcpu->stat.deliver_prefix_signal++; 204 vcpu->arch.sie_block->prefix = inti->prefix.address; 205 vcpu->arch.sie_block->ihcpu = 0xffff; 206 break; 207 208 case KVM_S390_RESTART: 209 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 210 vcpu->stat.deliver_restart_signal++; 211 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, 212 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 213 if (rc == -EFAULT) 214 exception = 1; 215 216 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 217 offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); 218 if (rc == -EFAULT) 219 exception = 1; 220 break; 221 222 case KVM_S390_PROGRAM_INT: 223 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 224 inti->pgm.code, 225 table[vcpu->arch.sie_block->ipa >> 14]); 226 vcpu->stat.deliver_program_int++; 227 rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code); 228 if (rc == -EFAULT) 229 exception = 1; 230 231 rc = put_guest_u16(vcpu, __LC_PGM_ILC, 232 table[vcpu->arch.sie_block->ipa >> 14]); 233 if (rc == -EFAULT) 234 exception = 1; 235 236 rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW, 237 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 238 if (rc == -EFAULT) 239 exception = 1; 240 241 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 242 __LC_PGM_NEW_PSW, sizeof(psw_t)); 243 if (rc == -EFAULT) 244 exception = 1; 245 break; 246 247 default: 248 BUG(); 249 } 250 if (exception) { 251 printk("kvm: The guest lowcore is not mapped during interrupt " 252 "delivery, killing userspace\n"); 253 do_exit(SIGKILL); 254 } 255 } 256 257 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 258 { 259 int rc, exception = 0; 260 261 if (psw_extint_disabled(vcpu)) 262 return 0; 263 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 264 return 0; 265 rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004); 266 if (rc == -EFAULT) 267 exception = 1; 268 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 269 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 270 if (rc == -EFAULT) 271 exception = 1; 272 rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 273 __LC_EXT_NEW_PSW, sizeof(psw_t)); 274 if (rc == -EFAULT) 275 exception = 1; 276 if (exception) { 277 printk("kvm: The guest lowcore is not mapped during interrupt " 278 "delivery, killing userspace\n"); 279 do_exit(SIGKILL); 280 } 281 return 1; 282 } 283 284 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 285 { 286 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 287 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 288 struct kvm_s390_interrupt_info *inti; 289 int rc = 0; 290 291 if (atomic_read(&li->active)) { 292 spin_lock_bh(&li->lock); 293 list_for_each_entry(inti, &li->list, list) 294 if (__interrupt_is_deliverable(vcpu, inti)) { 295 rc = 1; 296 break; 297 } 298 spin_unlock_bh(&li->lock); 299 } 300 301 if ((!rc) && atomic_read(&fi->active)) { 302 spin_lock_bh(&fi->lock); 303 list_for_each_entry(inti, &fi->list, list) 304 if (__interrupt_is_deliverable(vcpu, inti)) { 305 rc = 1; 306 break; 307 } 308 spin_unlock_bh(&fi->lock); 309 } 310 311 if ((!rc) && (vcpu->arch.sie_block->ckc < 312 get_clock() + vcpu->arch.sie_block->epoch)) { 313 if ((!psw_extint_disabled(vcpu)) && 314 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 315 rc = 1; 316 } 317 318 return rc; 319 } 320 321 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 322 { 323 return 0; 324 } 325 326 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 327 { 328 u64 now, sltime; 329 DECLARE_WAITQUEUE(wait, current); 330 331 vcpu->stat.exit_wait_state++; 332 if (kvm_cpu_has_interrupt(vcpu)) 333 return 0; 334 335 __set_cpu_idle(vcpu); 336 spin_lock_bh(&vcpu->arch.local_int.lock); 337 vcpu->arch.local_int.timer_due = 0; 338 spin_unlock_bh(&vcpu->arch.local_int.lock); 339 340 if (psw_interrupts_disabled(vcpu)) { 341 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 342 __unset_cpu_idle(vcpu); 343 return -ENOTSUPP; /* disabled wait */ 344 } 345 346 if (psw_extint_disabled(vcpu) || 347 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { 348 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 349 goto no_timer; 350 } 351 352 now = get_clock() + vcpu->arch.sie_block->epoch; 353 if (vcpu->arch.sie_block->ckc < now) { 354 __unset_cpu_idle(vcpu); 355 return 0; 356 } 357 358 sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1; 359 360 vcpu->arch.ckc_timer.expires = jiffies + sltime; 361 362 add_timer(&vcpu->arch.ckc_timer); 363 VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime); 364 no_timer: 365 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 366 spin_lock_bh(&vcpu->arch.local_int.lock); 367 add_wait_queue(&vcpu->arch.local_int.wq, &wait); 368 while (list_empty(&vcpu->arch.local_int.list) && 369 list_empty(&vcpu->arch.local_int.float_int->list) && 370 (!vcpu->arch.local_int.timer_due) && 371 !signal_pending(current)) { 372 set_current_state(TASK_INTERRUPTIBLE); 373 spin_unlock_bh(&vcpu->arch.local_int.lock); 374 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 375 vcpu_put(vcpu); 376 schedule(); 377 vcpu_load(vcpu); 378 spin_lock_bh(&vcpu->arch.local_int.float_int->lock); 379 spin_lock_bh(&vcpu->arch.local_int.lock); 380 } 381 __unset_cpu_idle(vcpu); 382 __set_current_state(TASK_RUNNING); 383 remove_wait_queue(&vcpu->wq, &wait); 384 spin_unlock_bh(&vcpu->arch.local_int.lock); 385 spin_unlock_bh(&vcpu->arch.local_int.float_int->lock); 386 del_timer(&vcpu->arch.ckc_timer); 387 return 0; 388 } 389 390 void kvm_s390_idle_wakeup(unsigned long data) 391 { 392 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 393 394 spin_lock_bh(&vcpu->arch.local_int.lock); 395 vcpu->arch.local_int.timer_due = 1; 396 if (waitqueue_active(&vcpu->arch.local_int.wq)) 397 wake_up_interruptible(&vcpu->arch.local_int.wq); 398 spin_unlock_bh(&vcpu->arch.local_int.lock); 399 } 400 401 402 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 403 { 404 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 405 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 406 struct kvm_s390_interrupt_info *n, *inti = NULL; 407 int deliver; 408 409 __reset_intercept_indicators(vcpu); 410 if (atomic_read(&li->active)) { 411 do { 412 deliver = 0; 413 spin_lock_bh(&li->lock); 414 list_for_each_entry_safe(inti, n, &li->list, list) { 415 if (__interrupt_is_deliverable(vcpu, inti)) { 416 list_del(&inti->list); 417 deliver = 1; 418 break; 419 } 420 __set_intercept_indicator(vcpu, inti); 421 } 422 if (list_empty(&li->list)) 423 atomic_set(&li->active, 0); 424 spin_unlock_bh(&li->lock); 425 if (deliver) { 426 __do_deliver_interrupt(vcpu, inti); 427 kfree(inti); 428 } 429 } while (deliver); 430 } 431 432 if ((vcpu->arch.sie_block->ckc < 433 get_clock() + vcpu->arch.sie_block->epoch)) 434 __try_deliver_ckc_interrupt(vcpu); 435 436 if (atomic_read(&fi->active)) { 437 do { 438 deliver = 0; 439 spin_lock_bh(&fi->lock); 440 list_for_each_entry_safe(inti, n, &fi->list, list) { 441 if (__interrupt_is_deliverable(vcpu, inti)) { 442 list_del(&inti->list); 443 deliver = 1; 444 break; 445 } 446 __set_intercept_indicator(vcpu, inti); 447 } 448 if (list_empty(&fi->list)) 449 atomic_set(&fi->active, 0); 450 spin_unlock_bh(&fi->lock); 451 if (deliver) { 452 __do_deliver_interrupt(vcpu, inti); 453 kfree(inti); 454 } 455 } while (deliver); 456 } 457 } 458 459 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 460 { 461 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 462 struct kvm_s390_interrupt_info *inti; 463 464 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 465 if (!inti) 466 return -ENOMEM; 467 468 inti->type = KVM_S390_PROGRAM_INT;; 469 inti->pgm.code = code; 470 471 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 472 spin_lock_bh(&li->lock); 473 list_add(&inti->list, &li->list); 474 atomic_set(&li->active, 1); 475 BUG_ON(waitqueue_active(&li->wq)); 476 spin_unlock_bh(&li->lock); 477 return 0; 478 } 479 480 int kvm_s390_inject_vm(struct kvm *kvm, 481 struct kvm_s390_interrupt *s390int) 482 { 483 struct kvm_s390_local_interrupt *li; 484 struct kvm_s390_float_interrupt *fi; 485 struct kvm_s390_interrupt_info *inti; 486 int sigcpu; 487 488 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 489 if (!inti) 490 return -ENOMEM; 491 492 switch (s390int->type) { 493 case KVM_S390_INT_VIRTIO: 494 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 495 s390int->parm, s390int->parm64); 496 inti->type = s390int->type; 497 inti->ext.ext_params = s390int->parm; 498 inti->ext.ext_params2 = s390int->parm64; 499 break; 500 case KVM_S390_INT_SERVICE: 501 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 502 inti->type = s390int->type; 503 inti->ext.ext_params = s390int->parm; 504 break; 505 case KVM_S390_PROGRAM_INT: 506 case KVM_S390_SIGP_STOP: 507 case KVM_S390_INT_EMERGENCY: 508 default: 509 kfree(inti); 510 return -EINVAL; 511 } 512 513 mutex_lock(&kvm->lock); 514 fi = &kvm->arch.float_int; 515 spin_lock_bh(&fi->lock); 516 list_add_tail(&inti->list, &fi->list); 517 atomic_set(&fi->active, 1); 518 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 519 if (sigcpu == KVM_MAX_VCPUS) { 520 do { 521 sigcpu = fi->next_rr_cpu++; 522 if (sigcpu == KVM_MAX_VCPUS) 523 sigcpu = fi->next_rr_cpu = 0; 524 } while (fi->local_int[sigcpu] == NULL); 525 } 526 li = fi->local_int[sigcpu]; 527 spin_lock_bh(&li->lock); 528 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 529 if (waitqueue_active(&li->wq)) 530 wake_up_interruptible(&li->wq); 531 spin_unlock_bh(&li->lock); 532 spin_unlock_bh(&fi->lock); 533 mutex_unlock(&kvm->lock); 534 return 0; 535 } 536 537 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 538 struct kvm_s390_interrupt *s390int) 539 { 540 struct kvm_s390_local_interrupt *li; 541 struct kvm_s390_interrupt_info *inti; 542 543 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 544 if (!inti) 545 return -ENOMEM; 546 547 switch (s390int->type) { 548 case KVM_S390_PROGRAM_INT: 549 if (s390int->parm & 0xffff0000) { 550 kfree(inti); 551 return -EINVAL; 552 } 553 inti->type = s390int->type; 554 inti->pgm.code = s390int->parm; 555 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 556 s390int->parm); 557 break; 558 case KVM_S390_SIGP_SET_PREFIX: 559 inti->prefix.address = s390int->parm; 560 inti->type = s390int->type; 561 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 562 s390int->parm); 563 break; 564 case KVM_S390_SIGP_STOP: 565 case KVM_S390_RESTART: 566 case KVM_S390_INT_EMERGENCY: 567 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 568 inti->type = s390int->type; 569 break; 570 case KVM_S390_INT_VIRTIO: 571 case KVM_S390_INT_SERVICE: 572 default: 573 kfree(inti); 574 return -EINVAL; 575 } 576 577 mutex_lock(&vcpu->kvm->lock); 578 li = &vcpu->arch.local_int; 579 spin_lock_bh(&li->lock); 580 if (inti->type == KVM_S390_PROGRAM_INT) 581 list_add(&inti->list, &li->list); 582 else 583 list_add_tail(&inti->list, &li->list); 584 atomic_set(&li->active, 1); 585 if (inti->type == KVM_S390_SIGP_STOP) 586 li->action_bits |= ACTION_STOP_ON_STOP; 587 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 588 if (waitqueue_active(&li->wq)) 589 wake_up_interruptible(&vcpu->arch.local_int.wq); 590 spin_unlock_bh(&li->lock); 591 mutex_unlock(&vcpu->kvm->lock); 592 return 0; 593 } 594