1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/signal.h> 17 #include <linux/slab.h> 18 #include <asm/asm-offsets.h> 19 #include <asm/uaccess.h> 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 #include "trace-s390.h" 23 24 #define IOINT_SCHID_MASK 0x0000ffff 25 #define IOINT_SSID_MASK 0x00030000 26 #define IOINT_CSSID_MASK 0x03fc0000 27 #define IOINT_AI_MASK 0x04000000 28 29 static int is_ioint(u64 type) 30 { 31 return ((type & 0xfffe0000u) != 0xfffe0000u); 32 } 33 34 static int psw_extint_disabled(struct kvm_vcpu *vcpu) 35 { 36 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 37 } 38 39 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 40 { 41 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 42 } 43 44 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 45 { 46 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 47 } 48 49 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 50 { 51 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 52 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 53 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 54 return 0; 55 return 1; 56 } 57 58 static u64 int_word_to_isc_bits(u32 int_word) 59 { 60 u8 isc = (int_word & 0x38000000) >> 27; 61 62 return (0x80 >> isc) << 24; 63 } 64 65 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 66 struct kvm_s390_interrupt_info *inti) 67 { 68 switch (inti->type) { 69 case KVM_S390_INT_EXTERNAL_CALL: 70 if (psw_extint_disabled(vcpu)) 71 return 0; 72 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 73 return 1; 74 case KVM_S390_INT_EMERGENCY: 75 if (psw_extint_disabled(vcpu)) 76 return 0; 77 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 78 return 1; 79 return 0; 80 case KVM_S390_INT_SERVICE: 81 if (psw_extint_disabled(vcpu)) 82 return 0; 83 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 84 return 1; 85 return 0; 86 case KVM_S390_INT_VIRTIO: 87 if (psw_extint_disabled(vcpu)) 88 return 0; 89 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 90 return 1; 91 return 0; 92 case KVM_S390_PROGRAM_INT: 93 case KVM_S390_SIGP_STOP: 94 case KVM_S390_SIGP_SET_PREFIX: 95 case KVM_S390_RESTART: 96 return 1; 97 case KVM_S390_MCHK: 98 if (psw_mchk_disabled(vcpu)) 99 return 0; 100 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) 101 return 1; 102 return 0; 103 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 104 if (psw_ioint_disabled(vcpu)) 105 return 0; 106 if (vcpu->arch.sie_block->gcr[6] & 107 int_word_to_isc_bits(inti->io.io_int_word)) 108 return 1; 109 return 0; 110 default: 111 printk(KERN_WARNING "illegal interrupt type %llx\n", 112 inti->type); 113 BUG(); 114 } 115 return 0; 116 } 117 118 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 119 { 120 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 121 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 122 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 123 } 124 125 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 126 { 127 BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1); 128 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 129 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 130 } 131 132 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 133 { 134 atomic_clear_mask(CPUSTAT_ECALL_PEND | 135 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 136 &vcpu->arch.sie_block->cpuflags); 137 vcpu->arch.sie_block->lctl = 0x0000; 138 vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; 139 } 140 141 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 142 { 143 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 144 } 145 146 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 147 struct kvm_s390_interrupt_info *inti) 148 { 149 switch (inti->type) { 150 case KVM_S390_INT_EXTERNAL_CALL: 151 case KVM_S390_INT_EMERGENCY: 152 case KVM_S390_INT_SERVICE: 153 case KVM_S390_INT_VIRTIO: 154 if (psw_extint_disabled(vcpu)) 155 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 156 else 157 vcpu->arch.sie_block->lctl |= LCTL_CR0; 158 break; 159 case KVM_S390_SIGP_STOP: 160 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 161 break; 162 case KVM_S390_MCHK: 163 if (psw_mchk_disabled(vcpu)) 164 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 165 else 166 vcpu->arch.sie_block->lctl |= LCTL_CR14; 167 break; 168 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 169 if (psw_ioint_disabled(vcpu)) 170 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 171 else 172 vcpu->arch.sie_block->lctl |= LCTL_CR6; 173 break; 174 default: 175 BUG(); 176 } 177 } 178 179 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 180 struct kvm_s390_interrupt_info *inti) 181 { 182 const unsigned short table[] = { 2, 4, 4, 6 }; 183 int rc = 0; 184 185 switch (inti->type) { 186 case KVM_S390_INT_EMERGENCY: 187 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 188 vcpu->stat.deliver_emergency_signal++; 189 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 190 inti->emerg.code, 0); 191 rc = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE); 192 rc |= put_guest(vcpu, inti->emerg.code, 193 (u16 __user *)__LC_EXT_CPU_ADDR); 194 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 195 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 196 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 197 __LC_EXT_NEW_PSW, sizeof(psw_t)); 198 break; 199 case KVM_S390_INT_EXTERNAL_CALL: 200 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 201 vcpu->stat.deliver_external_call++; 202 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 203 inti->extcall.code, 0); 204 rc = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE); 205 rc |= put_guest(vcpu, inti->extcall.code, 206 (u16 __user *)__LC_EXT_CPU_ADDR); 207 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 208 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 209 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 210 __LC_EXT_NEW_PSW, sizeof(psw_t)); 211 break; 212 case KVM_S390_INT_SERVICE: 213 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 214 inti->ext.ext_params); 215 vcpu->stat.deliver_service_signal++; 216 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 217 inti->ext.ext_params, 0); 218 rc = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE); 219 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 220 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 221 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 222 __LC_EXT_NEW_PSW, sizeof(psw_t)); 223 rc |= put_guest(vcpu, inti->ext.ext_params, 224 (u32 __user *)__LC_EXT_PARAMS); 225 break; 226 case KVM_S390_INT_VIRTIO: 227 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 228 inti->ext.ext_params, inti->ext.ext_params2); 229 vcpu->stat.deliver_virtio_interrupt++; 230 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 231 inti->ext.ext_params, 232 inti->ext.ext_params2); 233 rc = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE); 234 rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR); 235 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 236 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 237 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 238 __LC_EXT_NEW_PSW, sizeof(psw_t)); 239 rc |= put_guest(vcpu, inti->ext.ext_params, 240 (u32 __user *)__LC_EXT_PARAMS); 241 rc |= put_guest(vcpu, inti->ext.ext_params2, 242 (u64 __user *)__LC_EXT_PARAMS2); 243 break; 244 case KVM_S390_SIGP_STOP: 245 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 246 vcpu->stat.deliver_stop_signal++; 247 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 248 0, 0); 249 __set_intercept_indicator(vcpu, inti); 250 break; 251 252 case KVM_S390_SIGP_SET_PREFIX: 253 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 254 inti->prefix.address); 255 vcpu->stat.deliver_prefix_signal++; 256 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 257 inti->prefix.address, 0); 258 kvm_s390_set_prefix(vcpu, inti->prefix.address); 259 break; 260 261 case KVM_S390_RESTART: 262 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 263 vcpu->stat.deliver_restart_signal++; 264 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 265 0, 0); 266 rc = copy_to_guest(vcpu, 267 offsetof(struct _lowcore, restart_old_psw), 268 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 269 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 270 offsetof(struct _lowcore, restart_psw), 271 sizeof(psw_t)); 272 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 273 break; 274 case KVM_S390_PROGRAM_INT: 275 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 276 inti->pgm.code, 277 table[vcpu->arch.sie_block->ipa >> 14]); 278 vcpu->stat.deliver_program_int++; 279 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 280 inti->pgm.code, 0); 281 rc = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE); 282 rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14], 283 (u16 __user *)__LC_PGM_ILC); 284 rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW, 285 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 286 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 287 __LC_PGM_NEW_PSW, sizeof(psw_t)); 288 break; 289 290 case KVM_S390_MCHK: 291 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 292 inti->mchk.mcic); 293 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 294 inti->mchk.cr14, 295 inti->mchk.mcic); 296 rc = kvm_s390_vcpu_store_status(vcpu, 297 KVM_S390_STORE_STATUS_PREFIXED); 298 rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE); 299 rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW, 300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 301 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 302 __LC_MCK_NEW_PSW, sizeof(psw_t)); 303 break; 304 305 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 306 { 307 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | 308 inti->io.subchannel_nr; 309 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | 310 inti->io.io_int_word; 311 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 312 vcpu->stat.deliver_io_int++; 313 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 314 param0, param1); 315 rc = put_guest(vcpu, inti->io.subchannel_id, 316 (u16 __user *) __LC_SUBCHANNEL_ID); 317 rc |= put_guest(vcpu, inti->io.subchannel_nr, 318 (u16 __user *) __LC_SUBCHANNEL_NR); 319 rc |= put_guest(vcpu, inti->io.io_int_parm, 320 (u32 __user *) __LC_IO_INT_PARM); 321 rc |= put_guest(vcpu, inti->io.io_int_word, 322 (u32 __user *) __LC_IO_INT_WORD); 323 rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW, 324 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 325 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 326 __LC_IO_NEW_PSW, sizeof(psw_t)); 327 break; 328 } 329 default: 330 BUG(); 331 } 332 if (rc) { 333 printk("kvm: The guest lowcore is not mapped during interrupt " 334 "delivery, killing userspace\n"); 335 do_exit(SIGKILL); 336 } 337 } 338 339 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 340 { 341 int rc; 342 343 if (psw_extint_disabled(vcpu)) 344 return 0; 345 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 346 return 0; 347 rc = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); 348 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 349 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 350 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw, 351 __LC_EXT_NEW_PSW, sizeof(psw_t)); 352 if (rc) { 353 printk("kvm: The guest lowcore is not mapped during interrupt " 354 "delivery, killing userspace\n"); 355 do_exit(SIGKILL); 356 } 357 return 1; 358 } 359 360 static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 361 { 362 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 363 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 364 struct kvm_s390_interrupt_info *inti; 365 int rc = 0; 366 367 if (atomic_read(&li->active)) { 368 spin_lock_bh(&li->lock); 369 list_for_each_entry(inti, &li->list, list) 370 if (__interrupt_is_deliverable(vcpu, inti)) { 371 rc = 1; 372 break; 373 } 374 spin_unlock_bh(&li->lock); 375 } 376 377 if ((!rc) && atomic_read(&fi->active)) { 378 spin_lock(&fi->lock); 379 list_for_each_entry(inti, &fi->list, list) 380 if (__interrupt_is_deliverable(vcpu, inti)) { 381 rc = 1; 382 break; 383 } 384 spin_unlock(&fi->lock); 385 } 386 387 if ((!rc) && (vcpu->arch.sie_block->ckc < 388 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { 389 if ((!psw_extint_disabled(vcpu)) && 390 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 391 rc = 1; 392 } 393 394 return rc; 395 } 396 397 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 398 { 399 return 0; 400 } 401 402 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 403 { 404 u64 now, sltime; 405 DECLARE_WAITQUEUE(wait, current); 406 407 vcpu->stat.exit_wait_state++; 408 if (kvm_cpu_has_interrupt(vcpu)) 409 return 0; 410 411 __set_cpu_idle(vcpu); 412 spin_lock_bh(&vcpu->arch.local_int.lock); 413 vcpu->arch.local_int.timer_due = 0; 414 spin_unlock_bh(&vcpu->arch.local_int.lock); 415 416 if (psw_interrupts_disabled(vcpu)) { 417 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 418 __unset_cpu_idle(vcpu); 419 return -EOPNOTSUPP; /* disabled wait */ 420 } 421 422 if (psw_extint_disabled(vcpu) || 423 (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) { 424 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 425 goto no_timer; 426 } 427 428 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 429 if (vcpu->arch.sie_block->ckc < now) { 430 __unset_cpu_idle(vcpu); 431 return 0; 432 } 433 434 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 435 436 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 437 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 438 no_timer: 439 spin_lock(&vcpu->arch.local_int.float_int->lock); 440 spin_lock_bh(&vcpu->arch.local_int.lock); 441 add_wait_queue(&vcpu->wq, &wait); 442 while (list_empty(&vcpu->arch.local_int.list) && 443 list_empty(&vcpu->arch.local_int.float_int->list) && 444 (!vcpu->arch.local_int.timer_due) && 445 !signal_pending(current)) { 446 set_current_state(TASK_INTERRUPTIBLE); 447 spin_unlock_bh(&vcpu->arch.local_int.lock); 448 spin_unlock(&vcpu->arch.local_int.float_int->lock); 449 schedule(); 450 spin_lock(&vcpu->arch.local_int.float_int->lock); 451 spin_lock_bh(&vcpu->arch.local_int.lock); 452 } 453 __unset_cpu_idle(vcpu); 454 __set_current_state(TASK_RUNNING); 455 remove_wait_queue(&vcpu->wq, &wait); 456 spin_unlock_bh(&vcpu->arch.local_int.lock); 457 spin_unlock(&vcpu->arch.local_int.float_int->lock); 458 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 459 return 0; 460 } 461 462 void kvm_s390_tasklet(unsigned long parm) 463 { 464 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 465 466 spin_lock(&vcpu->arch.local_int.lock); 467 vcpu->arch.local_int.timer_due = 1; 468 if (waitqueue_active(&vcpu->wq)) 469 wake_up_interruptible(&vcpu->wq); 470 spin_unlock(&vcpu->arch.local_int.lock); 471 } 472 473 /* 474 * low level hrtimer wake routine. Because this runs in hardirq context 475 * we schedule a tasklet to do the real work. 476 */ 477 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 478 { 479 struct kvm_vcpu *vcpu; 480 481 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 482 tasklet_schedule(&vcpu->arch.tasklet); 483 484 return HRTIMER_NORESTART; 485 } 486 487 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 488 { 489 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 490 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 491 struct kvm_s390_interrupt_info *n, *inti = NULL; 492 int deliver; 493 494 __reset_intercept_indicators(vcpu); 495 if (atomic_read(&li->active)) { 496 do { 497 deliver = 0; 498 spin_lock_bh(&li->lock); 499 list_for_each_entry_safe(inti, n, &li->list, list) { 500 if (__interrupt_is_deliverable(vcpu, inti)) { 501 list_del(&inti->list); 502 deliver = 1; 503 break; 504 } 505 __set_intercept_indicator(vcpu, inti); 506 } 507 if (list_empty(&li->list)) 508 atomic_set(&li->active, 0); 509 spin_unlock_bh(&li->lock); 510 if (deliver) { 511 __do_deliver_interrupt(vcpu, inti); 512 kfree(inti); 513 } 514 } while (deliver); 515 } 516 517 if ((vcpu->arch.sie_block->ckc < 518 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 519 __try_deliver_ckc_interrupt(vcpu); 520 521 if (atomic_read(&fi->active)) { 522 do { 523 deliver = 0; 524 spin_lock(&fi->lock); 525 list_for_each_entry_safe(inti, n, &fi->list, list) { 526 if (__interrupt_is_deliverable(vcpu, inti)) { 527 list_del(&inti->list); 528 deliver = 1; 529 break; 530 } 531 __set_intercept_indicator(vcpu, inti); 532 } 533 if (list_empty(&fi->list)) 534 atomic_set(&fi->active, 0); 535 spin_unlock(&fi->lock); 536 if (deliver) { 537 __do_deliver_interrupt(vcpu, inti); 538 kfree(inti); 539 } 540 } while (deliver); 541 } 542 } 543 544 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) 545 { 546 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 547 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 548 struct kvm_s390_interrupt_info *n, *inti = NULL; 549 int deliver; 550 551 __reset_intercept_indicators(vcpu); 552 if (atomic_read(&li->active)) { 553 do { 554 deliver = 0; 555 spin_lock_bh(&li->lock); 556 list_for_each_entry_safe(inti, n, &li->list, list) { 557 if ((inti->type == KVM_S390_MCHK) && 558 __interrupt_is_deliverable(vcpu, inti)) { 559 list_del(&inti->list); 560 deliver = 1; 561 break; 562 } 563 __set_intercept_indicator(vcpu, inti); 564 } 565 if (list_empty(&li->list)) 566 atomic_set(&li->active, 0); 567 spin_unlock_bh(&li->lock); 568 if (deliver) { 569 __do_deliver_interrupt(vcpu, inti); 570 kfree(inti); 571 } 572 } while (deliver); 573 } 574 575 if (atomic_read(&fi->active)) { 576 do { 577 deliver = 0; 578 spin_lock(&fi->lock); 579 list_for_each_entry_safe(inti, n, &fi->list, list) { 580 if ((inti->type == KVM_S390_MCHK) && 581 __interrupt_is_deliverable(vcpu, inti)) { 582 list_del(&inti->list); 583 deliver = 1; 584 break; 585 } 586 __set_intercept_indicator(vcpu, inti); 587 } 588 if (list_empty(&fi->list)) 589 atomic_set(&fi->active, 0); 590 spin_unlock(&fi->lock); 591 if (deliver) { 592 __do_deliver_interrupt(vcpu, inti); 593 kfree(inti); 594 } 595 } while (deliver); 596 } 597 } 598 599 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 600 { 601 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 602 struct kvm_s390_interrupt_info *inti; 603 604 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 605 if (!inti) 606 return -ENOMEM; 607 608 inti->type = KVM_S390_PROGRAM_INT; 609 inti->pgm.code = code; 610 611 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 612 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 613 spin_lock_bh(&li->lock); 614 list_add(&inti->list, &li->list); 615 atomic_set(&li->active, 1); 616 BUG_ON(waitqueue_active(li->wq)); 617 spin_unlock_bh(&li->lock); 618 return 0; 619 } 620 621 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 622 u64 cr6, u64 schid) 623 { 624 struct kvm_s390_float_interrupt *fi; 625 struct kvm_s390_interrupt_info *inti, *iter; 626 627 if ((!schid && !cr6) || (schid && cr6)) 628 return NULL; 629 mutex_lock(&kvm->lock); 630 fi = &kvm->arch.float_int; 631 spin_lock(&fi->lock); 632 inti = NULL; 633 list_for_each_entry(iter, &fi->list, list) { 634 if (!is_ioint(iter->type)) 635 continue; 636 if (cr6 && 637 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) 638 continue; 639 if (schid) { 640 if (((schid & 0x00000000ffff0000) >> 16) != 641 iter->io.subchannel_id) 642 continue; 643 if ((schid & 0x000000000000ffff) != 644 iter->io.subchannel_nr) 645 continue; 646 } 647 inti = iter; 648 break; 649 } 650 if (inti) 651 list_del_init(&inti->list); 652 if (list_empty(&fi->list)) 653 atomic_set(&fi->active, 0); 654 spin_unlock(&fi->lock); 655 mutex_unlock(&kvm->lock); 656 return inti; 657 } 658 659 int kvm_s390_inject_vm(struct kvm *kvm, 660 struct kvm_s390_interrupt *s390int) 661 { 662 struct kvm_s390_local_interrupt *li; 663 struct kvm_s390_float_interrupt *fi; 664 struct kvm_s390_interrupt_info *inti, *iter; 665 int sigcpu; 666 667 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 668 if (!inti) 669 return -ENOMEM; 670 671 switch (s390int->type) { 672 case KVM_S390_INT_VIRTIO: 673 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 674 s390int->parm, s390int->parm64); 675 inti->type = s390int->type; 676 inti->ext.ext_params = s390int->parm; 677 inti->ext.ext_params2 = s390int->parm64; 678 break; 679 case KVM_S390_INT_SERVICE: 680 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 681 inti->type = s390int->type; 682 inti->ext.ext_params = s390int->parm; 683 break; 684 case KVM_S390_PROGRAM_INT: 685 case KVM_S390_SIGP_STOP: 686 case KVM_S390_INT_EXTERNAL_CALL: 687 case KVM_S390_INT_EMERGENCY: 688 kfree(inti); 689 return -EINVAL; 690 case KVM_S390_MCHK: 691 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 692 s390int->parm64); 693 inti->type = s390int->type; 694 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 695 inti->mchk.mcic = s390int->parm64; 696 break; 697 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 698 if (s390int->type & IOINT_AI_MASK) 699 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 700 else 701 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 702 s390int->type & IOINT_CSSID_MASK, 703 s390int->type & IOINT_SSID_MASK, 704 s390int->type & IOINT_SCHID_MASK); 705 inti->type = s390int->type; 706 inti->io.subchannel_id = s390int->parm >> 16; 707 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 708 inti->io.io_int_parm = s390int->parm64 >> 32; 709 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 710 break; 711 default: 712 kfree(inti); 713 return -EINVAL; 714 } 715 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 716 2); 717 718 mutex_lock(&kvm->lock); 719 fi = &kvm->arch.float_int; 720 spin_lock(&fi->lock); 721 if (!is_ioint(inti->type)) 722 list_add_tail(&inti->list, &fi->list); 723 else { 724 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); 725 726 /* Keep I/O interrupts sorted in isc order. */ 727 list_for_each_entry(iter, &fi->list, list) { 728 if (!is_ioint(iter->type)) 729 continue; 730 if (int_word_to_isc_bits(iter->io.io_int_word) 731 <= isc_bits) 732 continue; 733 break; 734 } 735 list_add_tail(&inti->list, &iter->list); 736 } 737 atomic_set(&fi->active, 1); 738 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 739 if (sigcpu == KVM_MAX_VCPUS) { 740 do { 741 sigcpu = fi->next_rr_cpu++; 742 if (sigcpu == KVM_MAX_VCPUS) 743 sigcpu = fi->next_rr_cpu = 0; 744 } while (fi->local_int[sigcpu] == NULL); 745 } 746 li = fi->local_int[sigcpu]; 747 spin_lock_bh(&li->lock); 748 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 749 if (waitqueue_active(li->wq)) 750 wake_up_interruptible(li->wq); 751 spin_unlock_bh(&li->lock); 752 spin_unlock(&fi->lock); 753 mutex_unlock(&kvm->lock); 754 return 0; 755 } 756 757 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 758 struct kvm_s390_interrupt *s390int) 759 { 760 struct kvm_s390_local_interrupt *li; 761 struct kvm_s390_interrupt_info *inti; 762 763 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 764 if (!inti) 765 return -ENOMEM; 766 767 switch (s390int->type) { 768 case KVM_S390_PROGRAM_INT: 769 if (s390int->parm & 0xffff0000) { 770 kfree(inti); 771 return -EINVAL; 772 } 773 inti->type = s390int->type; 774 inti->pgm.code = s390int->parm; 775 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 776 s390int->parm); 777 break; 778 case KVM_S390_SIGP_SET_PREFIX: 779 inti->prefix.address = s390int->parm; 780 inti->type = s390int->type; 781 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 782 s390int->parm); 783 break; 784 case KVM_S390_SIGP_STOP: 785 case KVM_S390_RESTART: 786 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 787 inti->type = s390int->type; 788 break; 789 case KVM_S390_INT_EXTERNAL_CALL: 790 if (s390int->parm & 0xffff0000) { 791 kfree(inti); 792 return -EINVAL; 793 } 794 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 795 s390int->parm); 796 inti->type = s390int->type; 797 inti->extcall.code = s390int->parm; 798 break; 799 case KVM_S390_INT_EMERGENCY: 800 if (s390int->parm & 0xffff0000) { 801 kfree(inti); 802 return -EINVAL; 803 } 804 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); 805 inti->type = s390int->type; 806 inti->emerg.code = s390int->parm; 807 break; 808 case KVM_S390_MCHK: 809 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 810 s390int->parm64); 811 inti->type = s390int->type; 812 inti->mchk.mcic = s390int->parm64; 813 break; 814 case KVM_S390_INT_VIRTIO: 815 case KVM_S390_INT_SERVICE: 816 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 817 default: 818 kfree(inti); 819 return -EINVAL; 820 } 821 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 822 s390int->parm64, 2); 823 824 mutex_lock(&vcpu->kvm->lock); 825 li = &vcpu->arch.local_int; 826 spin_lock_bh(&li->lock); 827 if (inti->type == KVM_S390_PROGRAM_INT) 828 list_add(&inti->list, &li->list); 829 else 830 list_add_tail(&inti->list, &li->list); 831 atomic_set(&li->active, 1); 832 if (inti->type == KVM_S390_SIGP_STOP) 833 li->action_bits |= ACTION_STOP_ON_STOP; 834 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 835 if (waitqueue_active(&vcpu->wq)) 836 wake_up_interruptible(&vcpu->wq); 837 spin_unlock_bh(&li->lock); 838 mutex_unlock(&vcpu->kvm->lock); 839 return 0; 840 } 841