1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008,2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/uaccess.h> 21 #include "kvm-s390.h" 22 #include "gaccess.h" 23 #include "trace-s390.h" 24 25 #define IOINT_SCHID_MASK 0x0000ffff 26 #define IOINT_SSID_MASK 0x00030000 27 #define IOINT_CSSID_MASK 0x03fc0000 28 #define IOINT_AI_MASK 0x04000000 29 30 static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu); 31 32 static int is_ioint(u64 type) 33 { 34 return ((type & 0xfffe0000u) != 0xfffe0000u); 35 } 36 37 int psw_extint_disabled(struct kvm_vcpu *vcpu) 38 { 39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 40 } 41 42 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 43 { 44 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 45 } 46 47 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 48 { 49 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 50 } 51 52 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 53 { 54 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 55 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 57 return 0; 58 return 1; 59 } 60 61 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 62 { 63 if (psw_extint_disabled(vcpu) || 64 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 65 return 0; 66 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 67 /* No timer interrupts when single stepping */ 68 return 0; 69 return 1; 70 } 71 72 static u64 int_word_to_isc_bits(u32 int_word) 73 { 74 u8 isc = (int_word & 0x38000000) >> 27; 75 76 return (0x80 >> isc) << 24; 77 } 78 79 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 80 struct kvm_s390_interrupt_info *inti) 81 { 82 switch (inti->type) { 83 case KVM_S390_INT_EXTERNAL_CALL: 84 if (psw_extint_disabled(vcpu)) 85 return 0; 86 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 87 return 1; 88 case KVM_S390_INT_EMERGENCY: 89 if (psw_extint_disabled(vcpu)) 90 return 0; 91 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 92 return 1; 93 return 0; 94 case KVM_S390_INT_CLOCK_COMP: 95 return ckc_interrupts_enabled(vcpu); 96 case KVM_S390_INT_CPU_TIMER: 97 if (psw_extint_disabled(vcpu)) 98 return 0; 99 if (vcpu->arch.sie_block->gcr[0] & 0x400ul) 100 return 1; 101 return 0; 102 case KVM_S390_INT_SERVICE: 103 case KVM_S390_INT_PFAULT_INIT: 104 case KVM_S390_INT_PFAULT_DONE: 105 case KVM_S390_INT_VIRTIO: 106 if (psw_extint_disabled(vcpu)) 107 return 0; 108 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 109 return 1; 110 return 0; 111 case KVM_S390_PROGRAM_INT: 112 case KVM_S390_SIGP_STOP: 113 case KVM_S390_SIGP_SET_PREFIX: 114 case KVM_S390_RESTART: 115 return 1; 116 case KVM_S390_MCHK: 117 if (psw_mchk_disabled(vcpu)) 118 return 0; 119 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) 120 return 1; 121 return 0; 122 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 123 if (psw_ioint_disabled(vcpu)) 124 return 0; 125 if (vcpu->arch.sie_block->gcr[6] & 126 int_word_to_isc_bits(inti->io.io_int_word)) 127 return 1; 128 return 0; 129 default: 130 printk(KERN_WARNING "illegal interrupt type %llx\n", 131 inti->type); 132 BUG(); 133 } 134 return 0; 135 } 136 137 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 138 { 139 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 140 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 141 } 142 143 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 144 { 145 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 146 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 147 } 148 149 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 150 { 151 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 152 &vcpu->arch.sie_block->cpuflags); 153 vcpu->arch.sie_block->lctl = 0x0000; 154 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 155 156 if (guestdbg_enabled(vcpu)) { 157 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 158 LCTL_CR10 | LCTL_CR11); 159 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 160 } 161 } 162 163 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 164 { 165 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 166 } 167 168 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 169 struct kvm_s390_interrupt_info *inti) 170 { 171 switch (inti->type) { 172 case KVM_S390_INT_EXTERNAL_CALL: 173 case KVM_S390_INT_EMERGENCY: 174 case KVM_S390_INT_SERVICE: 175 case KVM_S390_INT_PFAULT_INIT: 176 case KVM_S390_INT_PFAULT_DONE: 177 case KVM_S390_INT_VIRTIO: 178 case KVM_S390_INT_CLOCK_COMP: 179 case KVM_S390_INT_CPU_TIMER: 180 if (psw_extint_disabled(vcpu)) 181 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 182 else 183 vcpu->arch.sie_block->lctl |= LCTL_CR0; 184 break; 185 case KVM_S390_SIGP_STOP: 186 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 187 break; 188 case KVM_S390_MCHK: 189 if (psw_mchk_disabled(vcpu)) 190 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 191 else 192 vcpu->arch.sie_block->lctl |= LCTL_CR14; 193 break; 194 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 195 if (psw_ioint_disabled(vcpu)) 196 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 197 else 198 vcpu->arch.sie_block->lctl |= LCTL_CR6; 199 break; 200 default: 201 BUG(); 202 } 203 } 204 205 static int __deliver_prog_irq(struct kvm_vcpu *vcpu, 206 struct kvm_s390_pgm_info *pgm_info) 207 { 208 const unsigned short table[] = { 2, 4, 4, 6 }; 209 int rc = 0; 210 211 switch (pgm_info->code & ~PGM_PER) { 212 case PGM_AFX_TRANSLATION: 213 case PGM_ASX_TRANSLATION: 214 case PGM_EX_TRANSLATION: 215 case PGM_LFX_TRANSLATION: 216 case PGM_LSTE_SEQUENCE: 217 case PGM_LSX_TRANSLATION: 218 case PGM_LX_TRANSLATION: 219 case PGM_PRIMARY_AUTHORITY: 220 case PGM_SECONDARY_AUTHORITY: 221 case PGM_SPACE_SWITCH: 222 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 223 (u64 *)__LC_TRANS_EXC_CODE); 224 break; 225 case PGM_ALEN_TRANSLATION: 226 case PGM_ALE_SEQUENCE: 227 case PGM_ASTE_INSTANCE: 228 case PGM_ASTE_SEQUENCE: 229 case PGM_ASTE_VALIDITY: 230 case PGM_EXTENDED_AUTHORITY: 231 rc = put_guest_lc(vcpu, pgm_info->exc_access_id, 232 (u8 *)__LC_EXC_ACCESS_ID); 233 break; 234 case PGM_ASCE_TYPE: 235 case PGM_PAGE_TRANSLATION: 236 case PGM_REGION_FIRST_TRANS: 237 case PGM_REGION_SECOND_TRANS: 238 case PGM_REGION_THIRD_TRANS: 239 case PGM_SEGMENT_TRANSLATION: 240 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 241 (u64 *)__LC_TRANS_EXC_CODE); 242 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 243 (u8 *)__LC_EXC_ACCESS_ID); 244 rc |= put_guest_lc(vcpu, pgm_info->op_access_id, 245 (u8 *)__LC_OP_ACCESS_ID); 246 break; 247 case PGM_MONITOR: 248 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, 249 (u64 *)__LC_MON_CLASS_NR); 250 rc |= put_guest_lc(vcpu, pgm_info->mon_code, 251 (u64 *)__LC_MON_CODE); 252 break; 253 case PGM_DATA: 254 rc = put_guest_lc(vcpu, pgm_info->data_exc_code, 255 (u32 *)__LC_DATA_EXC_CODE); 256 break; 257 case PGM_PROTECTION: 258 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 259 (u64 *)__LC_TRANS_EXC_CODE); 260 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 261 (u8 *)__LC_EXC_ACCESS_ID); 262 break; 263 } 264 265 if (pgm_info->code & PGM_PER) { 266 rc |= put_guest_lc(vcpu, pgm_info->per_code, 267 (u8 *) __LC_PER_CODE); 268 rc |= put_guest_lc(vcpu, pgm_info->per_atmid, 269 (u8 *)__LC_PER_ATMID); 270 rc |= put_guest_lc(vcpu, pgm_info->per_address, 271 (u64 *) __LC_PER_ADDRESS); 272 rc |= put_guest_lc(vcpu, pgm_info->per_access_id, 273 (u8 *) __LC_PER_ACCESS_ID); 274 } 275 276 switch (vcpu->arch.sie_block->icptcode) { 277 case ICPT_INST: 278 case ICPT_INSTPROGI: 279 case ICPT_OPEREXC: 280 case ICPT_PARTEXEC: 281 case ICPT_IOINST: 282 /* last instruction only stored for these icptcodes */ 283 rc |= put_guest_lc(vcpu, table[vcpu->arch.sie_block->ipa >> 14], 284 (u16 *) __LC_PGM_ILC); 285 break; 286 case ICPT_PROGI: 287 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->pgmilc, 288 (u16 *) __LC_PGM_ILC); 289 break; 290 default: 291 rc |= put_guest_lc(vcpu, 0, 292 (u16 *) __LC_PGM_ILC); 293 } 294 295 rc |= put_guest_lc(vcpu, pgm_info->code, 296 (u16 *)__LC_PGM_INT_CODE); 297 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 298 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 299 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 301 302 return rc; 303 } 304 305 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 306 struct kvm_s390_interrupt_info *inti) 307 { 308 const unsigned short table[] = { 2, 4, 4, 6 }; 309 int rc = 0; 310 311 switch (inti->type) { 312 case KVM_S390_INT_EMERGENCY: 313 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 314 vcpu->stat.deliver_emergency_signal++; 315 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 316 inti->emerg.code, 0); 317 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); 318 rc |= put_guest_lc(vcpu, inti->emerg.code, 319 (u16 *)__LC_EXT_CPU_ADDR); 320 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 321 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 322 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 323 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 324 break; 325 case KVM_S390_INT_EXTERNAL_CALL: 326 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 327 vcpu->stat.deliver_external_call++; 328 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 329 inti->extcall.code, 0); 330 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); 331 rc |= put_guest_lc(vcpu, inti->extcall.code, 332 (u16 *)__LC_EXT_CPU_ADDR); 333 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 334 &vcpu->arch.sie_block->gpsw, 335 sizeof(psw_t)); 336 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 337 &vcpu->arch.sie_block->gpsw, 338 sizeof(psw_t)); 339 break; 340 case KVM_S390_INT_CLOCK_COMP: 341 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 342 inti->ext.ext_params, 0); 343 deliver_ckc_interrupt(vcpu); 344 break; 345 case KVM_S390_INT_CPU_TIMER: 346 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 347 inti->ext.ext_params, 0); 348 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 349 (u16 *)__LC_EXT_INT_CODE); 350 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 351 &vcpu->arch.sie_block->gpsw, 352 sizeof(psw_t)); 353 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 354 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 355 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 356 (u32 *)__LC_EXT_PARAMS); 357 break; 358 case KVM_S390_INT_SERVICE: 359 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 360 inti->ext.ext_params); 361 vcpu->stat.deliver_service_signal++; 362 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 363 inti->ext.ext_params, 0); 364 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); 365 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 366 &vcpu->arch.sie_block->gpsw, 367 sizeof(psw_t)); 368 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 369 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 370 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 371 (u32 *)__LC_EXT_PARAMS); 372 break; 373 case KVM_S390_INT_PFAULT_INIT: 374 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 375 inti->ext.ext_params2); 376 rc = put_guest_lc(vcpu, 0x2603, (u16 *) __LC_EXT_INT_CODE); 377 rc |= put_guest_lc(vcpu, 0x0600, (u16 *) __LC_EXT_CPU_ADDR); 378 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 379 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 380 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 381 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 382 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 383 (u64 *) __LC_EXT_PARAMS2); 384 break; 385 case KVM_S390_INT_PFAULT_DONE: 386 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 387 inti->ext.ext_params2); 388 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 389 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); 390 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 391 &vcpu->arch.sie_block->gpsw, 392 sizeof(psw_t)); 393 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 394 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 395 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 396 (u64 *)__LC_EXT_PARAMS2); 397 break; 398 case KVM_S390_INT_VIRTIO: 399 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 400 inti->ext.ext_params, inti->ext.ext_params2); 401 vcpu->stat.deliver_virtio_interrupt++; 402 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 403 inti->ext.ext_params, 404 inti->ext.ext_params2); 405 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 406 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); 407 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 408 &vcpu->arch.sie_block->gpsw, 409 sizeof(psw_t)); 410 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 411 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 412 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 413 (u32 *)__LC_EXT_PARAMS); 414 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 415 (u64 *)__LC_EXT_PARAMS2); 416 break; 417 case KVM_S390_SIGP_STOP: 418 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 419 vcpu->stat.deliver_stop_signal++; 420 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 421 0, 0); 422 __set_intercept_indicator(vcpu, inti); 423 break; 424 425 case KVM_S390_SIGP_SET_PREFIX: 426 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 427 inti->prefix.address); 428 vcpu->stat.deliver_prefix_signal++; 429 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 430 inti->prefix.address, 0); 431 kvm_s390_set_prefix(vcpu, inti->prefix.address); 432 break; 433 434 case KVM_S390_RESTART: 435 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 436 vcpu->stat.deliver_restart_signal++; 437 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 438 0, 0); 439 rc = write_guest_lc(vcpu, 440 offsetof(struct _lowcore, restart_old_psw), 441 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 442 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), 443 &vcpu->arch.sie_block->gpsw, 444 sizeof(psw_t)); 445 break; 446 case KVM_S390_PROGRAM_INT: 447 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 448 inti->pgm.code, 449 table[vcpu->arch.sie_block->ipa >> 14]); 450 vcpu->stat.deliver_program_int++; 451 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 452 inti->pgm.code, 0); 453 rc = __deliver_prog_irq(vcpu, &inti->pgm); 454 break; 455 456 case KVM_S390_MCHK: 457 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 458 inti->mchk.mcic); 459 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 460 inti->mchk.cr14, 461 inti->mchk.mcic); 462 rc = kvm_s390_vcpu_store_status(vcpu, 463 KVM_S390_STORE_STATUS_PREFIXED); 464 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); 465 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 466 &vcpu->arch.sie_block->gpsw, 467 sizeof(psw_t)); 468 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 469 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 470 break; 471 472 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 473 { 474 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | 475 inti->io.subchannel_nr; 476 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | 477 inti->io.io_int_word; 478 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 479 vcpu->stat.deliver_io_int++; 480 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 481 param0, param1); 482 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 483 (u16 *)__LC_SUBCHANNEL_ID); 484 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 485 (u16 *)__LC_SUBCHANNEL_NR); 486 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 487 (u32 *)__LC_IO_INT_PARM); 488 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 489 (u32 *)__LC_IO_INT_WORD); 490 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 491 &vcpu->arch.sie_block->gpsw, 492 sizeof(psw_t)); 493 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 494 &vcpu->arch.sie_block->gpsw, 495 sizeof(psw_t)); 496 break; 497 } 498 default: 499 BUG(); 500 } 501 if (rc) { 502 printk("kvm: The guest lowcore is not mapped during interrupt " 503 "delivery, killing userspace\n"); 504 do_exit(SIGKILL); 505 } 506 } 507 508 static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 509 { 510 int rc; 511 512 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); 513 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 514 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 515 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 516 &vcpu->arch.sie_block->gpsw, 517 sizeof(psw_t)); 518 if (rc) { 519 printk("kvm: The guest lowcore is not mapped during interrupt " 520 "delivery, killing userspace\n"); 521 do_exit(SIGKILL); 522 } 523 } 524 525 /* Check whether SIGP interpretation facility has an external call pending */ 526 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) 527 { 528 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; 529 530 if (!psw_extint_disabled(vcpu) && 531 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && 532 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && 533 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 534 return 1; 535 536 return 0; 537 } 538 539 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 540 { 541 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 542 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 543 struct kvm_s390_interrupt_info *inti; 544 int rc = 0; 545 546 if (atomic_read(&li->active)) { 547 spin_lock_bh(&li->lock); 548 list_for_each_entry(inti, &li->list, list) 549 if (__interrupt_is_deliverable(vcpu, inti)) { 550 rc = 1; 551 break; 552 } 553 spin_unlock_bh(&li->lock); 554 } 555 556 if ((!rc) && atomic_read(&fi->active)) { 557 spin_lock(&fi->lock); 558 list_for_each_entry(inti, &fi->list, list) 559 if (__interrupt_is_deliverable(vcpu, inti)) { 560 rc = 1; 561 break; 562 } 563 spin_unlock(&fi->lock); 564 } 565 566 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 567 rc = 1; 568 569 if (!rc && kvm_s390_si_ext_call_pending(vcpu)) 570 rc = 1; 571 572 return rc; 573 } 574 575 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 576 { 577 if (!(vcpu->arch.sie_block->ckc < 578 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 579 return 0; 580 if (!ckc_interrupts_enabled(vcpu)) 581 return 0; 582 return 1; 583 } 584 585 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 586 { 587 u64 now, sltime; 588 DECLARE_WAITQUEUE(wait, current); 589 590 vcpu->stat.exit_wait_state++; 591 if (kvm_cpu_has_interrupt(vcpu)) 592 return 0; 593 594 __set_cpu_idle(vcpu); 595 spin_lock_bh(&vcpu->arch.local_int.lock); 596 vcpu->arch.local_int.timer_due = 0; 597 spin_unlock_bh(&vcpu->arch.local_int.lock); 598 599 if (psw_interrupts_disabled(vcpu)) { 600 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 601 __unset_cpu_idle(vcpu); 602 return -EOPNOTSUPP; /* disabled wait */ 603 } 604 605 if (!ckc_interrupts_enabled(vcpu)) { 606 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 607 goto no_timer; 608 } 609 610 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 611 if (vcpu->arch.sie_block->ckc < now) { 612 __unset_cpu_idle(vcpu); 613 return 0; 614 } 615 616 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 617 618 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 619 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 620 no_timer: 621 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 622 spin_lock(&vcpu->arch.local_int.float_int->lock); 623 spin_lock_bh(&vcpu->arch.local_int.lock); 624 add_wait_queue(&vcpu->wq, &wait); 625 while (list_empty(&vcpu->arch.local_int.list) && 626 list_empty(&vcpu->arch.local_int.float_int->list) && 627 (!vcpu->arch.local_int.timer_due) && 628 !signal_pending(current) && 629 !kvm_s390_si_ext_call_pending(vcpu)) { 630 set_current_state(TASK_INTERRUPTIBLE); 631 spin_unlock_bh(&vcpu->arch.local_int.lock); 632 spin_unlock(&vcpu->arch.local_int.float_int->lock); 633 schedule(); 634 spin_lock(&vcpu->arch.local_int.float_int->lock); 635 spin_lock_bh(&vcpu->arch.local_int.lock); 636 } 637 __unset_cpu_idle(vcpu); 638 __set_current_state(TASK_RUNNING); 639 remove_wait_queue(&vcpu->wq, &wait); 640 spin_unlock_bh(&vcpu->arch.local_int.lock); 641 spin_unlock(&vcpu->arch.local_int.float_int->lock); 642 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 643 644 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 645 return 0; 646 } 647 648 void kvm_s390_tasklet(unsigned long parm) 649 { 650 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 651 652 spin_lock(&vcpu->arch.local_int.lock); 653 vcpu->arch.local_int.timer_due = 1; 654 if (waitqueue_active(&vcpu->wq)) 655 wake_up_interruptible(&vcpu->wq); 656 spin_unlock(&vcpu->arch.local_int.lock); 657 } 658 659 /* 660 * low level hrtimer wake routine. Because this runs in hardirq context 661 * we schedule a tasklet to do the real work. 662 */ 663 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 664 { 665 struct kvm_vcpu *vcpu; 666 667 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 668 vcpu->preempted = true; 669 tasklet_schedule(&vcpu->arch.tasklet); 670 671 return HRTIMER_NORESTART; 672 } 673 674 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 675 { 676 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 677 struct kvm_s390_interrupt_info *n, *inti = NULL; 678 679 spin_lock_bh(&li->lock); 680 list_for_each_entry_safe(inti, n, &li->list, list) { 681 list_del(&inti->list); 682 kfree(inti); 683 } 684 atomic_set(&li->active, 0); 685 spin_unlock_bh(&li->lock); 686 687 /* clear pending external calls set by sigp interpretation facility */ 688 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 689 atomic_clear_mask(SIGP_CTRL_C, 690 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); 691 } 692 693 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 694 { 695 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 696 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 697 struct kvm_s390_interrupt_info *n, *inti = NULL; 698 int deliver; 699 700 __reset_intercept_indicators(vcpu); 701 if (atomic_read(&li->active)) { 702 do { 703 deliver = 0; 704 spin_lock_bh(&li->lock); 705 list_for_each_entry_safe(inti, n, &li->list, list) { 706 if (__interrupt_is_deliverable(vcpu, inti)) { 707 list_del(&inti->list); 708 deliver = 1; 709 break; 710 } 711 __set_intercept_indicator(vcpu, inti); 712 } 713 if (list_empty(&li->list)) 714 atomic_set(&li->active, 0); 715 spin_unlock_bh(&li->lock); 716 if (deliver) { 717 __do_deliver_interrupt(vcpu, inti); 718 kfree(inti); 719 } 720 } while (deliver); 721 } 722 723 if (kvm_cpu_has_pending_timer(vcpu)) 724 deliver_ckc_interrupt(vcpu); 725 726 if (atomic_read(&fi->active)) { 727 do { 728 deliver = 0; 729 spin_lock(&fi->lock); 730 list_for_each_entry_safe(inti, n, &fi->list, list) { 731 if (__interrupt_is_deliverable(vcpu, inti)) { 732 list_del(&inti->list); 733 fi->irq_count--; 734 deliver = 1; 735 break; 736 } 737 __set_intercept_indicator(vcpu, inti); 738 } 739 if (list_empty(&fi->list)) 740 atomic_set(&fi->active, 0); 741 spin_unlock(&fi->lock); 742 if (deliver) { 743 __do_deliver_interrupt(vcpu, inti); 744 kfree(inti); 745 } 746 } while (deliver); 747 } 748 } 749 750 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) 751 { 752 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 753 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 754 struct kvm_s390_interrupt_info *n, *inti = NULL; 755 int deliver; 756 757 __reset_intercept_indicators(vcpu); 758 if (atomic_read(&li->active)) { 759 do { 760 deliver = 0; 761 spin_lock_bh(&li->lock); 762 list_for_each_entry_safe(inti, n, &li->list, list) { 763 if ((inti->type == KVM_S390_MCHK) && 764 __interrupt_is_deliverable(vcpu, inti)) { 765 list_del(&inti->list); 766 deliver = 1; 767 break; 768 } 769 __set_intercept_indicator(vcpu, inti); 770 } 771 if (list_empty(&li->list)) 772 atomic_set(&li->active, 0); 773 spin_unlock_bh(&li->lock); 774 if (deliver) { 775 __do_deliver_interrupt(vcpu, inti); 776 kfree(inti); 777 } 778 } while (deliver); 779 } 780 781 if (atomic_read(&fi->active)) { 782 do { 783 deliver = 0; 784 spin_lock(&fi->lock); 785 list_for_each_entry_safe(inti, n, &fi->list, list) { 786 if ((inti->type == KVM_S390_MCHK) && 787 __interrupt_is_deliverable(vcpu, inti)) { 788 list_del(&inti->list); 789 fi->irq_count--; 790 deliver = 1; 791 break; 792 } 793 __set_intercept_indicator(vcpu, inti); 794 } 795 if (list_empty(&fi->list)) 796 atomic_set(&fi->active, 0); 797 spin_unlock(&fi->lock); 798 if (deliver) { 799 __do_deliver_interrupt(vcpu, inti); 800 kfree(inti); 801 } 802 } while (deliver); 803 } 804 } 805 806 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 807 { 808 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 809 struct kvm_s390_interrupt_info *inti; 810 811 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 812 if (!inti) 813 return -ENOMEM; 814 815 inti->type = KVM_S390_PROGRAM_INT; 816 inti->pgm.code = code; 817 818 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 819 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 820 spin_lock_bh(&li->lock); 821 list_add(&inti->list, &li->list); 822 atomic_set(&li->active, 1); 823 BUG_ON(waitqueue_active(li->wq)); 824 spin_unlock_bh(&li->lock); 825 return 0; 826 } 827 828 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, 829 struct kvm_s390_pgm_info *pgm_info) 830 { 831 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 832 struct kvm_s390_interrupt_info *inti; 833 834 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 835 if (!inti) 836 return -ENOMEM; 837 838 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", 839 pgm_info->code); 840 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 841 pgm_info->code, 0, 1); 842 843 inti->type = KVM_S390_PROGRAM_INT; 844 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); 845 spin_lock_bh(&li->lock); 846 list_add(&inti->list, &li->list); 847 atomic_set(&li->active, 1); 848 BUG_ON(waitqueue_active(li->wq)); 849 spin_unlock_bh(&li->lock); 850 return 0; 851 } 852 853 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 854 u64 cr6, u64 schid) 855 { 856 struct kvm_s390_float_interrupt *fi; 857 struct kvm_s390_interrupt_info *inti, *iter; 858 859 if ((!schid && !cr6) || (schid && cr6)) 860 return NULL; 861 mutex_lock(&kvm->lock); 862 fi = &kvm->arch.float_int; 863 spin_lock(&fi->lock); 864 inti = NULL; 865 list_for_each_entry(iter, &fi->list, list) { 866 if (!is_ioint(iter->type)) 867 continue; 868 if (cr6 && 869 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) 870 continue; 871 if (schid) { 872 if (((schid & 0x00000000ffff0000) >> 16) != 873 iter->io.subchannel_id) 874 continue; 875 if ((schid & 0x000000000000ffff) != 876 iter->io.subchannel_nr) 877 continue; 878 } 879 inti = iter; 880 break; 881 } 882 if (inti) { 883 list_del_init(&inti->list); 884 fi->irq_count--; 885 } 886 if (list_empty(&fi->list)) 887 atomic_set(&fi->active, 0); 888 spin_unlock(&fi->lock); 889 mutex_unlock(&kvm->lock); 890 return inti; 891 } 892 893 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 894 { 895 struct kvm_s390_local_interrupt *li; 896 struct kvm_s390_float_interrupt *fi; 897 struct kvm_s390_interrupt_info *iter; 898 struct kvm_vcpu *dst_vcpu = NULL; 899 int sigcpu; 900 int rc = 0; 901 902 mutex_lock(&kvm->lock); 903 fi = &kvm->arch.float_int; 904 spin_lock(&fi->lock); 905 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { 906 rc = -EINVAL; 907 goto unlock_fi; 908 } 909 fi->irq_count++; 910 if (!is_ioint(inti->type)) { 911 list_add_tail(&inti->list, &fi->list); 912 } else { 913 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); 914 915 /* Keep I/O interrupts sorted in isc order. */ 916 list_for_each_entry(iter, &fi->list, list) { 917 if (!is_ioint(iter->type)) 918 continue; 919 if (int_word_to_isc_bits(iter->io.io_int_word) 920 <= isc_bits) 921 continue; 922 break; 923 } 924 list_add_tail(&inti->list, &iter->list); 925 } 926 atomic_set(&fi->active, 1); 927 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 928 if (sigcpu == KVM_MAX_VCPUS) { 929 do { 930 sigcpu = fi->next_rr_cpu++; 931 if (sigcpu == KVM_MAX_VCPUS) 932 sigcpu = fi->next_rr_cpu = 0; 933 } while (kvm_get_vcpu(kvm, sigcpu) == NULL); 934 } 935 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 936 li = &dst_vcpu->arch.local_int; 937 spin_lock_bh(&li->lock); 938 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 939 if (waitqueue_active(li->wq)) 940 wake_up_interruptible(li->wq); 941 kvm_get_vcpu(kvm, sigcpu)->preempted = true; 942 spin_unlock_bh(&li->lock); 943 unlock_fi: 944 spin_unlock(&fi->lock); 945 mutex_unlock(&kvm->lock); 946 return rc; 947 } 948 949 int kvm_s390_inject_vm(struct kvm *kvm, 950 struct kvm_s390_interrupt *s390int) 951 { 952 struct kvm_s390_interrupt_info *inti; 953 954 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 955 if (!inti) 956 return -ENOMEM; 957 958 inti->type = s390int->type; 959 switch (inti->type) { 960 case KVM_S390_INT_VIRTIO: 961 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 962 s390int->parm, s390int->parm64); 963 inti->ext.ext_params = s390int->parm; 964 inti->ext.ext_params2 = s390int->parm64; 965 break; 966 case KVM_S390_INT_SERVICE: 967 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 968 inti->ext.ext_params = s390int->parm; 969 break; 970 case KVM_S390_INT_PFAULT_DONE: 971 inti->type = s390int->type; 972 inti->ext.ext_params2 = s390int->parm64; 973 break; 974 case KVM_S390_MCHK: 975 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 976 s390int->parm64); 977 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 978 inti->mchk.mcic = s390int->parm64; 979 break; 980 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 981 if (inti->type & IOINT_AI_MASK) 982 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 983 else 984 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 985 s390int->type & IOINT_CSSID_MASK, 986 s390int->type & IOINT_SSID_MASK, 987 s390int->type & IOINT_SCHID_MASK); 988 inti->io.subchannel_id = s390int->parm >> 16; 989 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 990 inti->io.io_int_parm = s390int->parm64 >> 32; 991 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 992 break; 993 default: 994 kfree(inti); 995 return -EINVAL; 996 } 997 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 998 2); 999 1000 return __inject_vm(kvm, inti); 1001 } 1002 1003 void kvm_s390_reinject_io_int(struct kvm *kvm, 1004 struct kvm_s390_interrupt_info *inti) 1005 { 1006 __inject_vm(kvm, inti); 1007 } 1008 1009 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 1010 struct kvm_s390_interrupt *s390int) 1011 { 1012 struct kvm_s390_local_interrupt *li; 1013 struct kvm_s390_interrupt_info *inti; 1014 1015 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1016 if (!inti) 1017 return -ENOMEM; 1018 1019 switch (s390int->type) { 1020 case KVM_S390_PROGRAM_INT: 1021 if (s390int->parm & 0xffff0000) { 1022 kfree(inti); 1023 return -EINVAL; 1024 } 1025 inti->type = s390int->type; 1026 inti->pgm.code = s390int->parm; 1027 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 1028 s390int->parm); 1029 break; 1030 case KVM_S390_SIGP_SET_PREFIX: 1031 inti->prefix.address = s390int->parm; 1032 inti->type = s390int->type; 1033 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 1034 s390int->parm); 1035 break; 1036 case KVM_S390_SIGP_STOP: 1037 case KVM_S390_RESTART: 1038 case KVM_S390_INT_CLOCK_COMP: 1039 case KVM_S390_INT_CPU_TIMER: 1040 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 1041 inti->type = s390int->type; 1042 break; 1043 case KVM_S390_INT_EXTERNAL_CALL: 1044 if (s390int->parm & 0xffff0000) { 1045 kfree(inti); 1046 return -EINVAL; 1047 } 1048 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 1049 s390int->parm); 1050 inti->type = s390int->type; 1051 inti->extcall.code = s390int->parm; 1052 break; 1053 case KVM_S390_INT_EMERGENCY: 1054 if (s390int->parm & 0xffff0000) { 1055 kfree(inti); 1056 return -EINVAL; 1057 } 1058 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); 1059 inti->type = s390int->type; 1060 inti->emerg.code = s390int->parm; 1061 break; 1062 case KVM_S390_MCHK: 1063 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1064 s390int->parm64); 1065 inti->type = s390int->type; 1066 inti->mchk.mcic = s390int->parm64; 1067 break; 1068 case KVM_S390_INT_PFAULT_INIT: 1069 inti->type = s390int->type; 1070 inti->ext.ext_params2 = s390int->parm64; 1071 break; 1072 case KVM_S390_INT_VIRTIO: 1073 case KVM_S390_INT_SERVICE: 1074 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1075 default: 1076 kfree(inti); 1077 return -EINVAL; 1078 } 1079 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 1080 s390int->parm64, 2); 1081 1082 mutex_lock(&vcpu->kvm->lock); 1083 li = &vcpu->arch.local_int; 1084 spin_lock_bh(&li->lock); 1085 if (inti->type == KVM_S390_PROGRAM_INT) 1086 list_add(&inti->list, &li->list); 1087 else 1088 list_add_tail(&inti->list, &li->list); 1089 atomic_set(&li->active, 1); 1090 if (inti->type == KVM_S390_SIGP_STOP) 1091 li->action_bits |= ACTION_STOP_ON_STOP; 1092 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1093 if (waitqueue_active(&vcpu->wq)) 1094 wake_up_interruptible(&vcpu->wq); 1095 vcpu->preempted = true; 1096 spin_unlock_bh(&li->lock); 1097 mutex_unlock(&vcpu->kvm->lock); 1098 return 0; 1099 } 1100 1101 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1102 { 1103 struct kvm_s390_float_interrupt *fi; 1104 struct kvm_s390_interrupt_info *n, *inti = NULL; 1105 1106 mutex_lock(&kvm->lock); 1107 fi = &kvm->arch.float_int; 1108 spin_lock(&fi->lock); 1109 list_for_each_entry_safe(inti, n, &fi->list, list) { 1110 list_del(&inti->list); 1111 kfree(inti); 1112 } 1113 fi->irq_count = 0; 1114 atomic_set(&fi->active, 0); 1115 spin_unlock(&fi->lock); 1116 mutex_unlock(&kvm->lock); 1117 } 1118 1119 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, 1120 u8 *addr) 1121 { 1122 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1123 struct kvm_s390_irq irq = {0}; 1124 1125 irq.type = inti->type; 1126 switch (inti->type) { 1127 case KVM_S390_INT_PFAULT_INIT: 1128 case KVM_S390_INT_PFAULT_DONE: 1129 case KVM_S390_INT_VIRTIO: 1130 case KVM_S390_INT_SERVICE: 1131 irq.u.ext = inti->ext; 1132 break; 1133 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1134 irq.u.io = inti->io; 1135 break; 1136 case KVM_S390_MCHK: 1137 irq.u.mchk = inti->mchk; 1138 break; 1139 default: 1140 return -EINVAL; 1141 } 1142 1143 if (copy_to_user(uptr, &irq, sizeof(irq))) 1144 return -EFAULT; 1145 1146 return 0; 1147 } 1148 1149 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) 1150 { 1151 struct kvm_s390_interrupt_info *inti; 1152 struct kvm_s390_float_interrupt *fi; 1153 int ret = 0; 1154 int n = 0; 1155 1156 mutex_lock(&kvm->lock); 1157 fi = &kvm->arch.float_int; 1158 spin_lock(&fi->lock); 1159 1160 list_for_each_entry(inti, &fi->list, list) { 1161 if (len < sizeof(struct kvm_s390_irq)) { 1162 /* signal userspace to try again */ 1163 ret = -ENOMEM; 1164 break; 1165 } 1166 ret = copy_irq_to_user(inti, buf); 1167 if (ret) 1168 break; 1169 buf += sizeof(struct kvm_s390_irq); 1170 len -= sizeof(struct kvm_s390_irq); 1171 n++; 1172 } 1173 1174 spin_unlock(&fi->lock); 1175 mutex_unlock(&kvm->lock); 1176 1177 return ret < 0 ? ret : n; 1178 } 1179 1180 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1181 { 1182 int r; 1183 1184 switch (attr->group) { 1185 case KVM_DEV_FLIC_GET_ALL_IRQS: 1186 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, 1187 attr->attr); 1188 break; 1189 default: 1190 r = -EINVAL; 1191 } 1192 1193 return r; 1194 } 1195 1196 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1197 u64 addr) 1198 { 1199 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1200 void *target = NULL; 1201 void __user *source; 1202 u64 size; 1203 1204 if (get_user(inti->type, (u64 __user *)addr)) 1205 return -EFAULT; 1206 1207 switch (inti->type) { 1208 case KVM_S390_INT_PFAULT_INIT: 1209 case KVM_S390_INT_PFAULT_DONE: 1210 case KVM_S390_INT_VIRTIO: 1211 case KVM_S390_INT_SERVICE: 1212 target = (void *) &inti->ext; 1213 source = &uptr->u.ext; 1214 size = sizeof(inti->ext); 1215 break; 1216 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1217 target = (void *) &inti->io; 1218 source = &uptr->u.io; 1219 size = sizeof(inti->io); 1220 break; 1221 case KVM_S390_MCHK: 1222 target = (void *) &inti->mchk; 1223 source = &uptr->u.mchk; 1224 size = sizeof(inti->mchk); 1225 break; 1226 default: 1227 return -EINVAL; 1228 } 1229 1230 if (copy_from_user(target, source, size)) 1231 return -EFAULT; 1232 1233 return 0; 1234 } 1235 1236 static int enqueue_floating_irq(struct kvm_device *dev, 1237 struct kvm_device_attr *attr) 1238 { 1239 struct kvm_s390_interrupt_info *inti = NULL; 1240 int r = 0; 1241 int len = attr->attr; 1242 1243 if (len % sizeof(struct kvm_s390_irq) != 0) 1244 return -EINVAL; 1245 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1246 return -EINVAL; 1247 1248 while (len >= sizeof(struct kvm_s390_irq)) { 1249 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1250 if (!inti) 1251 return -ENOMEM; 1252 1253 r = copy_irq_from_user(inti, attr->addr); 1254 if (r) { 1255 kfree(inti); 1256 return r; 1257 } 1258 r = __inject_vm(dev->kvm, inti); 1259 if (r) { 1260 kfree(inti); 1261 return r; 1262 } 1263 len -= sizeof(struct kvm_s390_irq); 1264 attr->addr += sizeof(struct kvm_s390_irq); 1265 } 1266 1267 return r; 1268 } 1269 1270 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1271 { 1272 if (id >= MAX_S390_IO_ADAPTERS) 1273 return NULL; 1274 return kvm->arch.adapters[id]; 1275 } 1276 1277 static int register_io_adapter(struct kvm_device *dev, 1278 struct kvm_device_attr *attr) 1279 { 1280 struct s390_io_adapter *adapter; 1281 struct kvm_s390_io_adapter adapter_info; 1282 1283 if (copy_from_user(&adapter_info, 1284 (void __user *)attr->addr, sizeof(adapter_info))) 1285 return -EFAULT; 1286 1287 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1288 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1289 return -EINVAL; 1290 1291 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1292 if (!adapter) 1293 return -ENOMEM; 1294 1295 INIT_LIST_HEAD(&adapter->maps); 1296 init_rwsem(&adapter->maps_lock); 1297 atomic_set(&adapter->nr_maps, 0); 1298 adapter->id = adapter_info.id; 1299 adapter->isc = adapter_info.isc; 1300 adapter->maskable = adapter_info.maskable; 1301 adapter->masked = false; 1302 adapter->swap = adapter_info.swap; 1303 dev->kvm->arch.adapters[adapter->id] = adapter; 1304 1305 return 0; 1306 } 1307 1308 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1309 { 1310 int ret; 1311 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1312 1313 if (!adapter || !adapter->maskable) 1314 return -EINVAL; 1315 ret = adapter->masked; 1316 adapter->masked = masked; 1317 return ret; 1318 } 1319 1320 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1321 { 1322 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1323 struct s390_map_info *map; 1324 int ret; 1325 1326 if (!adapter || !addr) 1327 return -EINVAL; 1328 1329 map = kzalloc(sizeof(*map), GFP_KERNEL); 1330 if (!map) { 1331 ret = -ENOMEM; 1332 goto out; 1333 } 1334 INIT_LIST_HEAD(&map->list); 1335 map->guest_addr = addr; 1336 map->addr = gmap_translate(addr, kvm->arch.gmap); 1337 if (map->addr == -EFAULT) { 1338 ret = -EFAULT; 1339 goto out; 1340 } 1341 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1342 if (ret < 0) 1343 goto out; 1344 BUG_ON(ret != 1); 1345 down_write(&adapter->maps_lock); 1346 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1347 list_add_tail(&map->list, &adapter->maps); 1348 ret = 0; 1349 } else { 1350 put_page(map->page); 1351 ret = -EINVAL; 1352 } 1353 up_write(&adapter->maps_lock); 1354 out: 1355 if (ret) 1356 kfree(map); 1357 return ret; 1358 } 1359 1360 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1361 { 1362 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1363 struct s390_map_info *map, *tmp; 1364 int found = 0; 1365 1366 if (!adapter || !addr) 1367 return -EINVAL; 1368 1369 down_write(&adapter->maps_lock); 1370 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1371 if (map->guest_addr == addr) { 1372 found = 1; 1373 atomic_dec(&adapter->nr_maps); 1374 list_del(&map->list); 1375 put_page(map->page); 1376 kfree(map); 1377 break; 1378 } 1379 } 1380 up_write(&adapter->maps_lock); 1381 1382 return found ? 0 : -EINVAL; 1383 } 1384 1385 void kvm_s390_destroy_adapters(struct kvm *kvm) 1386 { 1387 int i; 1388 struct s390_map_info *map, *tmp; 1389 1390 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 1391 if (!kvm->arch.adapters[i]) 1392 continue; 1393 list_for_each_entry_safe(map, tmp, 1394 &kvm->arch.adapters[i]->maps, list) { 1395 list_del(&map->list); 1396 put_page(map->page); 1397 kfree(map); 1398 } 1399 kfree(kvm->arch.adapters[i]); 1400 } 1401 } 1402 1403 static int modify_io_adapter(struct kvm_device *dev, 1404 struct kvm_device_attr *attr) 1405 { 1406 struct kvm_s390_io_adapter_req req; 1407 struct s390_io_adapter *adapter; 1408 int ret; 1409 1410 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 1411 return -EFAULT; 1412 1413 adapter = get_io_adapter(dev->kvm, req.id); 1414 if (!adapter) 1415 return -EINVAL; 1416 switch (req.type) { 1417 case KVM_S390_IO_ADAPTER_MASK: 1418 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 1419 if (ret > 0) 1420 ret = 0; 1421 break; 1422 case KVM_S390_IO_ADAPTER_MAP: 1423 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 1424 break; 1425 case KVM_S390_IO_ADAPTER_UNMAP: 1426 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 1427 break; 1428 default: 1429 ret = -EINVAL; 1430 } 1431 1432 return ret; 1433 } 1434 1435 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1436 { 1437 int r = 0; 1438 unsigned int i; 1439 struct kvm_vcpu *vcpu; 1440 1441 switch (attr->group) { 1442 case KVM_DEV_FLIC_ENQUEUE: 1443 r = enqueue_floating_irq(dev, attr); 1444 break; 1445 case KVM_DEV_FLIC_CLEAR_IRQS: 1446 r = 0; 1447 kvm_s390_clear_float_irqs(dev->kvm); 1448 break; 1449 case KVM_DEV_FLIC_APF_ENABLE: 1450 dev->kvm->arch.gmap->pfault_enabled = 1; 1451 break; 1452 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 1453 dev->kvm->arch.gmap->pfault_enabled = 0; 1454 /* 1455 * Make sure no async faults are in transition when 1456 * clearing the queues. So we don't need to worry 1457 * about late coming workers. 1458 */ 1459 synchronize_srcu(&dev->kvm->srcu); 1460 kvm_for_each_vcpu(i, vcpu, dev->kvm) 1461 kvm_clear_async_pf_completion_queue(vcpu); 1462 break; 1463 case KVM_DEV_FLIC_ADAPTER_REGISTER: 1464 r = register_io_adapter(dev, attr); 1465 break; 1466 case KVM_DEV_FLIC_ADAPTER_MODIFY: 1467 r = modify_io_adapter(dev, attr); 1468 break; 1469 default: 1470 r = -EINVAL; 1471 } 1472 1473 return r; 1474 } 1475 1476 static int flic_create(struct kvm_device *dev, u32 type) 1477 { 1478 if (!dev) 1479 return -EINVAL; 1480 if (dev->kvm->arch.flic) 1481 return -EINVAL; 1482 dev->kvm->arch.flic = dev; 1483 return 0; 1484 } 1485 1486 static void flic_destroy(struct kvm_device *dev) 1487 { 1488 dev->kvm->arch.flic = NULL; 1489 kfree(dev); 1490 } 1491 1492 /* s390 floating irq controller (flic) */ 1493 struct kvm_device_ops kvm_flic_ops = { 1494 .name = "kvm-flic", 1495 .get_attr = flic_get_attr, 1496 .set_attr = flic_set_attr, 1497 .create = flic_create, 1498 .destroy = flic_destroy, 1499 }; 1500 1501 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 1502 { 1503 unsigned long bit; 1504 1505 bit = bit_nr + (addr % PAGE_SIZE) * 8; 1506 1507 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 1508 } 1509 1510 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 1511 u64 addr) 1512 { 1513 struct s390_map_info *map; 1514 1515 if (!adapter) 1516 return NULL; 1517 1518 list_for_each_entry(map, &adapter->maps, list) { 1519 if (map->guest_addr == addr) 1520 return map; 1521 } 1522 return NULL; 1523 } 1524 1525 static int adapter_indicators_set(struct kvm *kvm, 1526 struct s390_io_adapter *adapter, 1527 struct kvm_s390_adapter_int *adapter_int) 1528 { 1529 unsigned long bit; 1530 int summary_set, idx; 1531 struct s390_map_info *info; 1532 void *map; 1533 1534 info = get_map_info(adapter, adapter_int->ind_addr); 1535 if (!info) 1536 return -1; 1537 map = page_address(info->page); 1538 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 1539 set_bit(bit, map); 1540 idx = srcu_read_lock(&kvm->srcu); 1541 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1542 set_page_dirty_lock(info->page); 1543 info = get_map_info(adapter, adapter_int->summary_addr); 1544 if (!info) { 1545 srcu_read_unlock(&kvm->srcu, idx); 1546 return -1; 1547 } 1548 map = page_address(info->page); 1549 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 1550 adapter->swap); 1551 summary_set = test_and_set_bit(bit, map); 1552 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1553 set_page_dirty_lock(info->page); 1554 srcu_read_unlock(&kvm->srcu, idx); 1555 return summary_set ? 0 : 1; 1556 } 1557 1558 /* 1559 * < 0 - not injected due to error 1560 * = 0 - coalesced, summary indicator already active 1561 * > 0 - injected interrupt 1562 */ 1563 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 1564 struct kvm *kvm, int irq_source_id, int level, 1565 bool line_status) 1566 { 1567 int ret; 1568 struct s390_io_adapter *adapter; 1569 1570 /* We're only interested in the 0->1 transition. */ 1571 if (!level) 1572 return 0; 1573 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 1574 if (!adapter) 1575 return -1; 1576 down_read(&adapter->maps_lock); 1577 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 1578 up_read(&adapter->maps_lock); 1579 if ((ret > 0) && !adapter->masked) { 1580 struct kvm_s390_interrupt s390int = { 1581 .type = KVM_S390_INT_IO(1, 0, 0, 0), 1582 .parm = 0, 1583 .parm64 = (adapter->isc << 27) | 0x80000000, 1584 }; 1585 ret = kvm_s390_inject_vm(kvm, &s390int); 1586 if (ret == 0) 1587 ret = 1; 1588 } 1589 return ret; 1590 } 1591 1592 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 1593 struct kvm_kernel_irq_routing_entry *e, 1594 const struct kvm_irq_routing_entry *ue) 1595 { 1596 int ret; 1597 1598 switch (ue->type) { 1599 case KVM_IRQ_ROUTING_S390_ADAPTER: 1600 e->set = set_adapter_int; 1601 e->adapter.summary_addr = ue->u.adapter.summary_addr; 1602 e->adapter.ind_addr = ue->u.adapter.ind_addr; 1603 e->adapter.summary_offset = ue->u.adapter.summary_offset; 1604 e->adapter.ind_offset = ue->u.adapter.ind_offset; 1605 e->adapter.adapter_id = ue->u.adapter.adapter_id; 1606 ret = 0; 1607 break; 1608 default: 1609 ret = -EINVAL; 1610 } 1611 1612 return ret; 1613 } 1614 1615 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1616 int irq_source_id, int level, bool line_status) 1617 { 1618 return -EINVAL; 1619 } 1620