1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008,2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/uaccess.h> 21 #include "kvm-s390.h" 22 #include "gaccess.h" 23 #include "trace-s390.h" 24 25 #define IOINT_SCHID_MASK 0x0000ffff 26 #define IOINT_SSID_MASK 0x00030000 27 #define IOINT_CSSID_MASK 0x03fc0000 28 #define IOINT_AI_MASK 0x04000000 29 30 static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu); 31 32 static int is_ioint(u64 type) 33 { 34 return ((type & 0xfffe0000u) != 0xfffe0000u); 35 } 36 37 int psw_extint_disabled(struct kvm_vcpu *vcpu) 38 { 39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 40 } 41 42 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 43 { 44 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 45 } 46 47 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 48 { 49 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 50 } 51 52 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 53 { 54 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 55 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 57 return 0; 58 return 1; 59 } 60 61 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 62 { 63 if (psw_extint_disabled(vcpu) || 64 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 65 return 0; 66 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 67 /* No timer interrupts when single stepping */ 68 return 0; 69 return 1; 70 } 71 72 static u64 int_word_to_isc_bits(u32 int_word) 73 { 74 u8 isc = (int_word & 0x38000000) >> 27; 75 76 return (0x80 >> isc) << 24; 77 } 78 79 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 80 struct kvm_s390_interrupt_info *inti) 81 { 82 switch (inti->type) { 83 case KVM_S390_INT_EXTERNAL_CALL: 84 if (psw_extint_disabled(vcpu)) 85 return 0; 86 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 87 return 1; 88 case KVM_S390_INT_EMERGENCY: 89 if (psw_extint_disabled(vcpu)) 90 return 0; 91 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 92 return 1; 93 return 0; 94 case KVM_S390_INT_CLOCK_COMP: 95 return ckc_interrupts_enabled(vcpu); 96 case KVM_S390_INT_CPU_TIMER: 97 if (psw_extint_disabled(vcpu)) 98 return 0; 99 if (vcpu->arch.sie_block->gcr[0] & 0x400ul) 100 return 1; 101 return 0; 102 case KVM_S390_INT_SERVICE: 103 case KVM_S390_INT_PFAULT_INIT: 104 case KVM_S390_INT_PFAULT_DONE: 105 case KVM_S390_INT_VIRTIO: 106 if (psw_extint_disabled(vcpu)) 107 return 0; 108 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 109 return 1; 110 return 0; 111 case KVM_S390_PROGRAM_INT: 112 case KVM_S390_SIGP_STOP: 113 case KVM_S390_SIGP_SET_PREFIX: 114 case KVM_S390_RESTART: 115 return 1; 116 case KVM_S390_MCHK: 117 if (psw_mchk_disabled(vcpu)) 118 return 0; 119 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) 120 return 1; 121 return 0; 122 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 123 if (psw_ioint_disabled(vcpu)) 124 return 0; 125 if (vcpu->arch.sie_block->gcr[6] & 126 int_word_to_isc_bits(inti->io.io_int_word)) 127 return 1; 128 return 0; 129 default: 130 printk(KERN_WARNING "illegal interrupt type %llx\n", 131 inti->type); 132 BUG(); 133 } 134 return 0; 135 } 136 137 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 138 { 139 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 140 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 141 } 142 143 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 144 { 145 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 146 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 147 } 148 149 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 150 { 151 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 152 &vcpu->arch.sie_block->cpuflags); 153 vcpu->arch.sie_block->lctl = 0x0000; 154 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 155 156 if (guestdbg_enabled(vcpu)) { 157 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 158 LCTL_CR10 | LCTL_CR11); 159 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 160 } 161 } 162 163 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 164 { 165 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 166 } 167 168 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 169 struct kvm_s390_interrupt_info *inti) 170 { 171 switch (inti->type) { 172 case KVM_S390_INT_EXTERNAL_CALL: 173 case KVM_S390_INT_EMERGENCY: 174 case KVM_S390_INT_SERVICE: 175 case KVM_S390_INT_PFAULT_INIT: 176 case KVM_S390_INT_PFAULT_DONE: 177 case KVM_S390_INT_VIRTIO: 178 case KVM_S390_INT_CLOCK_COMP: 179 case KVM_S390_INT_CPU_TIMER: 180 if (psw_extint_disabled(vcpu)) 181 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 182 else 183 vcpu->arch.sie_block->lctl |= LCTL_CR0; 184 break; 185 case KVM_S390_SIGP_STOP: 186 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 187 break; 188 case KVM_S390_MCHK: 189 if (psw_mchk_disabled(vcpu)) 190 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 191 else 192 vcpu->arch.sie_block->lctl |= LCTL_CR14; 193 break; 194 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 195 if (psw_ioint_disabled(vcpu)) 196 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 197 else 198 vcpu->arch.sie_block->lctl |= LCTL_CR6; 199 break; 200 default: 201 BUG(); 202 } 203 } 204 205 static int __deliver_prog_irq(struct kvm_vcpu *vcpu, 206 struct kvm_s390_pgm_info *pgm_info) 207 { 208 const unsigned short table[] = { 2, 4, 4, 6 }; 209 int rc = 0; 210 211 switch (pgm_info->code & ~PGM_PER) { 212 case PGM_AFX_TRANSLATION: 213 case PGM_ASX_TRANSLATION: 214 case PGM_EX_TRANSLATION: 215 case PGM_LFX_TRANSLATION: 216 case PGM_LSTE_SEQUENCE: 217 case PGM_LSX_TRANSLATION: 218 case PGM_LX_TRANSLATION: 219 case PGM_PRIMARY_AUTHORITY: 220 case PGM_SECONDARY_AUTHORITY: 221 case PGM_SPACE_SWITCH: 222 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 223 (u64 *)__LC_TRANS_EXC_CODE); 224 break; 225 case PGM_ALEN_TRANSLATION: 226 case PGM_ALE_SEQUENCE: 227 case PGM_ASTE_INSTANCE: 228 case PGM_ASTE_SEQUENCE: 229 case PGM_ASTE_VALIDITY: 230 case PGM_EXTENDED_AUTHORITY: 231 rc = put_guest_lc(vcpu, pgm_info->exc_access_id, 232 (u8 *)__LC_EXC_ACCESS_ID); 233 break; 234 case PGM_ASCE_TYPE: 235 case PGM_PAGE_TRANSLATION: 236 case PGM_REGION_FIRST_TRANS: 237 case PGM_REGION_SECOND_TRANS: 238 case PGM_REGION_THIRD_TRANS: 239 case PGM_SEGMENT_TRANSLATION: 240 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 241 (u64 *)__LC_TRANS_EXC_CODE); 242 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 243 (u8 *)__LC_EXC_ACCESS_ID); 244 rc |= put_guest_lc(vcpu, pgm_info->op_access_id, 245 (u8 *)__LC_OP_ACCESS_ID); 246 break; 247 case PGM_MONITOR: 248 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, 249 (u64 *)__LC_MON_CLASS_NR); 250 rc |= put_guest_lc(vcpu, pgm_info->mon_code, 251 (u64 *)__LC_MON_CODE); 252 break; 253 case PGM_DATA: 254 rc = put_guest_lc(vcpu, pgm_info->data_exc_code, 255 (u32 *)__LC_DATA_EXC_CODE); 256 break; 257 case PGM_PROTECTION: 258 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 259 (u64 *)__LC_TRANS_EXC_CODE); 260 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 261 (u8 *)__LC_EXC_ACCESS_ID); 262 break; 263 } 264 265 if (pgm_info->code & PGM_PER) { 266 rc |= put_guest_lc(vcpu, pgm_info->per_code, 267 (u8 *) __LC_PER_CODE); 268 rc |= put_guest_lc(vcpu, pgm_info->per_atmid, 269 (u8 *)__LC_PER_ATMID); 270 rc |= put_guest_lc(vcpu, pgm_info->per_address, 271 (u64 *) __LC_PER_ADDRESS); 272 rc |= put_guest_lc(vcpu, pgm_info->per_access_id, 273 (u8 *) __LC_PER_ACCESS_ID); 274 } 275 276 switch (vcpu->arch.sie_block->icptcode) { 277 case ICPT_INST: 278 case ICPT_INSTPROGI: 279 case ICPT_OPEREXC: 280 case ICPT_PARTEXEC: 281 case ICPT_IOINST: 282 /* last instruction only stored for these icptcodes */ 283 rc |= put_guest_lc(vcpu, table[vcpu->arch.sie_block->ipa >> 14], 284 (u16 *) __LC_PGM_ILC); 285 break; 286 case ICPT_PROGI: 287 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->pgmilc, 288 (u16 *) __LC_PGM_ILC); 289 break; 290 default: 291 rc |= put_guest_lc(vcpu, 0, 292 (u16 *) __LC_PGM_ILC); 293 } 294 295 rc |= put_guest_lc(vcpu, pgm_info->code, 296 (u16 *)__LC_PGM_INT_CODE); 297 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 298 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 299 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 301 302 return rc; 303 } 304 305 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 306 struct kvm_s390_interrupt_info *inti) 307 { 308 const unsigned short table[] = { 2, 4, 4, 6 }; 309 int rc = 0; 310 311 switch (inti->type) { 312 case KVM_S390_INT_EMERGENCY: 313 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 314 vcpu->stat.deliver_emergency_signal++; 315 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 316 inti->emerg.code, 0); 317 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); 318 rc |= put_guest_lc(vcpu, inti->emerg.code, 319 (u16 *)__LC_EXT_CPU_ADDR); 320 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 321 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 322 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 323 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 324 break; 325 case KVM_S390_INT_EXTERNAL_CALL: 326 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 327 vcpu->stat.deliver_external_call++; 328 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 329 inti->extcall.code, 0); 330 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); 331 rc |= put_guest_lc(vcpu, inti->extcall.code, 332 (u16 *)__LC_EXT_CPU_ADDR); 333 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 334 &vcpu->arch.sie_block->gpsw, 335 sizeof(psw_t)); 336 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 337 &vcpu->arch.sie_block->gpsw, 338 sizeof(psw_t)); 339 break; 340 case KVM_S390_INT_CLOCK_COMP: 341 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 342 inti->ext.ext_params, 0); 343 deliver_ckc_interrupt(vcpu); 344 break; 345 case KVM_S390_INT_CPU_TIMER: 346 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 347 inti->ext.ext_params, 0); 348 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 349 (u16 *)__LC_EXT_INT_CODE); 350 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 351 &vcpu->arch.sie_block->gpsw, 352 sizeof(psw_t)); 353 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 354 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 355 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 356 (u32 *)__LC_EXT_PARAMS); 357 break; 358 case KVM_S390_INT_SERVICE: 359 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 360 inti->ext.ext_params); 361 vcpu->stat.deliver_service_signal++; 362 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 363 inti->ext.ext_params, 0); 364 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); 365 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 366 &vcpu->arch.sie_block->gpsw, 367 sizeof(psw_t)); 368 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 369 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 370 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 371 (u32 *)__LC_EXT_PARAMS); 372 break; 373 case KVM_S390_INT_PFAULT_INIT: 374 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 375 inti->ext.ext_params2); 376 rc = put_guest_lc(vcpu, 0x2603, (u16 *) __LC_EXT_INT_CODE); 377 rc |= put_guest_lc(vcpu, 0x0600, (u16 *) __LC_EXT_CPU_ADDR); 378 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 379 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 380 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 381 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 382 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 383 (u64 *) __LC_EXT_PARAMS2); 384 break; 385 case KVM_S390_INT_PFAULT_DONE: 386 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 387 inti->ext.ext_params2); 388 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 389 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); 390 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 391 &vcpu->arch.sie_block->gpsw, 392 sizeof(psw_t)); 393 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 394 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 395 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 396 (u64 *)__LC_EXT_PARAMS2); 397 break; 398 case KVM_S390_INT_VIRTIO: 399 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 400 inti->ext.ext_params, inti->ext.ext_params2); 401 vcpu->stat.deliver_virtio_interrupt++; 402 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 403 inti->ext.ext_params, 404 inti->ext.ext_params2); 405 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 406 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); 407 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 408 &vcpu->arch.sie_block->gpsw, 409 sizeof(psw_t)); 410 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 411 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 412 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 413 (u32 *)__LC_EXT_PARAMS); 414 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 415 (u64 *)__LC_EXT_PARAMS2); 416 break; 417 case KVM_S390_SIGP_STOP: 418 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 419 vcpu->stat.deliver_stop_signal++; 420 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 421 0, 0); 422 __set_intercept_indicator(vcpu, inti); 423 break; 424 425 case KVM_S390_SIGP_SET_PREFIX: 426 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 427 inti->prefix.address); 428 vcpu->stat.deliver_prefix_signal++; 429 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 430 inti->prefix.address, 0); 431 kvm_s390_set_prefix(vcpu, inti->prefix.address); 432 break; 433 434 case KVM_S390_RESTART: 435 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 436 vcpu->stat.deliver_restart_signal++; 437 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 438 0, 0); 439 rc = write_guest_lc(vcpu, 440 offsetof(struct _lowcore, restart_old_psw), 441 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 442 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), 443 &vcpu->arch.sie_block->gpsw, 444 sizeof(psw_t)); 445 kvm_s390_vcpu_start(vcpu); 446 break; 447 case KVM_S390_PROGRAM_INT: 448 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 449 inti->pgm.code, 450 table[vcpu->arch.sie_block->ipa >> 14]); 451 vcpu->stat.deliver_program_int++; 452 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 453 inti->pgm.code, 0); 454 rc = __deliver_prog_irq(vcpu, &inti->pgm); 455 break; 456 457 case KVM_S390_MCHK: 458 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 459 inti->mchk.mcic); 460 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 461 inti->mchk.cr14, 462 inti->mchk.mcic); 463 rc = kvm_s390_vcpu_store_status(vcpu, 464 KVM_S390_STORE_STATUS_PREFIXED); 465 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); 466 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 467 &vcpu->arch.sie_block->gpsw, 468 sizeof(psw_t)); 469 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 470 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 471 break; 472 473 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 474 { 475 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | 476 inti->io.subchannel_nr; 477 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | 478 inti->io.io_int_word; 479 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 480 vcpu->stat.deliver_io_int++; 481 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 482 param0, param1); 483 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 484 (u16 *)__LC_SUBCHANNEL_ID); 485 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 486 (u16 *)__LC_SUBCHANNEL_NR); 487 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 488 (u32 *)__LC_IO_INT_PARM); 489 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 490 (u32 *)__LC_IO_INT_WORD); 491 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 492 &vcpu->arch.sie_block->gpsw, 493 sizeof(psw_t)); 494 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 495 &vcpu->arch.sie_block->gpsw, 496 sizeof(psw_t)); 497 break; 498 } 499 default: 500 BUG(); 501 } 502 if (rc) { 503 printk("kvm: The guest lowcore is not mapped during interrupt " 504 "delivery, killing userspace\n"); 505 do_exit(SIGKILL); 506 } 507 } 508 509 static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 510 { 511 int rc; 512 513 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); 514 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 515 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 516 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 517 &vcpu->arch.sie_block->gpsw, 518 sizeof(psw_t)); 519 if (rc) { 520 printk("kvm: The guest lowcore is not mapped during interrupt " 521 "delivery, killing userspace\n"); 522 do_exit(SIGKILL); 523 } 524 } 525 526 /* Check whether SIGP interpretation facility has an external call pending */ 527 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) 528 { 529 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; 530 531 if (!psw_extint_disabled(vcpu) && 532 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && 533 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && 534 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 535 return 1; 536 537 return 0; 538 } 539 540 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 541 { 542 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 543 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 544 struct kvm_s390_interrupt_info *inti; 545 int rc = 0; 546 547 if (atomic_read(&li->active)) { 548 spin_lock_bh(&li->lock); 549 list_for_each_entry(inti, &li->list, list) 550 if (__interrupt_is_deliverable(vcpu, inti)) { 551 rc = 1; 552 break; 553 } 554 spin_unlock_bh(&li->lock); 555 } 556 557 if ((!rc) && atomic_read(&fi->active)) { 558 spin_lock(&fi->lock); 559 list_for_each_entry(inti, &fi->list, list) 560 if (__interrupt_is_deliverable(vcpu, inti)) { 561 rc = 1; 562 break; 563 } 564 spin_unlock(&fi->lock); 565 } 566 567 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 568 rc = 1; 569 570 if (!rc && kvm_s390_si_ext_call_pending(vcpu)) 571 rc = 1; 572 573 return rc; 574 } 575 576 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 577 { 578 if (!(vcpu->arch.sie_block->ckc < 579 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 580 return 0; 581 if (!ckc_interrupts_enabled(vcpu)) 582 return 0; 583 return 1; 584 } 585 586 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 587 { 588 u64 now, sltime; 589 DECLARE_WAITQUEUE(wait, current); 590 591 vcpu->stat.exit_wait_state++; 592 if (kvm_cpu_has_interrupt(vcpu)) 593 return 0; 594 595 __set_cpu_idle(vcpu); 596 spin_lock_bh(&vcpu->arch.local_int.lock); 597 vcpu->arch.local_int.timer_due = 0; 598 spin_unlock_bh(&vcpu->arch.local_int.lock); 599 600 if (psw_interrupts_disabled(vcpu)) { 601 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 602 __unset_cpu_idle(vcpu); 603 return -EOPNOTSUPP; /* disabled wait */ 604 } 605 606 if (!ckc_interrupts_enabled(vcpu)) { 607 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 608 goto no_timer; 609 } 610 611 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 612 if (vcpu->arch.sie_block->ckc < now) { 613 __unset_cpu_idle(vcpu); 614 return 0; 615 } 616 617 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 618 619 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 620 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 621 no_timer: 622 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 623 spin_lock(&vcpu->arch.local_int.float_int->lock); 624 spin_lock_bh(&vcpu->arch.local_int.lock); 625 add_wait_queue(&vcpu->wq, &wait); 626 while (list_empty(&vcpu->arch.local_int.list) && 627 list_empty(&vcpu->arch.local_int.float_int->list) && 628 (!vcpu->arch.local_int.timer_due) && 629 !signal_pending(current) && 630 !kvm_s390_si_ext_call_pending(vcpu)) { 631 set_current_state(TASK_INTERRUPTIBLE); 632 spin_unlock_bh(&vcpu->arch.local_int.lock); 633 spin_unlock(&vcpu->arch.local_int.float_int->lock); 634 schedule(); 635 spin_lock(&vcpu->arch.local_int.float_int->lock); 636 spin_lock_bh(&vcpu->arch.local_int.lock); 637 } 638 __unset_cpu_idle(vcpu); 639 __set_current_state(TASK_RUNNING); 640 remove_wait_queue(&vcpu->wq, &wait); 641 spin_unlock_bh(&vcpu->arch.local_int.lock); 642 spin_unlock(&vcpu->arch.local_int.float_int->lock); 643 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 644 645 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 646 return 0; 647 } 648 649 void kvm_s390_tasklet(unsigned long parm) 650 { 651 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 652 653 spin_lock(&vcpu->arch.local_int.lock); 654 vcpu->arch.local_int.timer_due = 1; 655 if (waitqueue_active(&vcpu->wq)) 656 wake_up_interruptible(&vcpu->wq); 657 spin_unlock(&vcpu->arch.local_int.lock); 658 } 659 660 /* 661 * low level hrtimer wake routine. Because this runs in hardirq context 662 * we schedule a tasklet to do the real work. 663 */ 664 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 665 { 666 struct kvm_vcpu *vcpu; 667 668 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 669 vcpu->preempted = true; 670 tasklet_schedule(&vcpu->arch.tasklet); 671 672 return HRTIMER_NORESTART; 673 } 674 675 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 676 { 677 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 678 struct kvm_s390_interrupt_info *n, *inti = NULL; 679 680 spin_lock_bh(&li->lock); 681 list_for_each_entry_safe(inti, n, &li->list, list) { 682 list_del(&inti->list); 683 kfree(inti); 684 } 685 atomic_set(&li->active, 0); 686 spin_unlock_bh(&li->lock); 687 688 /* clear pending external calls set by sigp interpretation facility */ 689 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 690 atomic_clear_mask(SIGP_CTRL_C, 691 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); 692 } 693 694 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 695 { 696 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 697 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 698 struct kvm_s390_interrupt_info *n, *inti = NULL; 699 int deliver; 700 701 __reset_intercept_indicators(vcpu); 702 if (atomic_read(&li->active)) { 703 do { 704 deliver = 0; 705 spin_lock_bh(&li->lock); 706 list_for_each_entry_safe(inti, n, &li->list, list) { 707 if (__interrupt_is_deliverable(vcpu, inti)) { 708 list_del(&inti->list); 709 deliver = 1; 710 break; 711 } 712 __set_intercept_indicator(vcpu, inti); 713 } 714 if (list_empty(&li->list)) 715 atomic_set(&li->active, 0); 716 spin_unlock_bh(&li->lock); 717 if (deliver) { 718 __do_deliver_interrupt(vcpu, inti); 719 kfree(inti); 720 } 721 } while (deliver); 722 } 723 724 if (kvm_cpu_has_pending_timer(vcpu)) 725 deliver_ckc_interrupt(vcpu); 726 727 if (atomic_read(&fi->active)) { 728 do { 729 deliver = 0; 730 spin_lock(&fi->lock); 731 list_for_each_entry_safe(inti, n, &fi->list, list) { 732 if (__interrupt_is_deliverable(vcpu, inti)) { 733 list_del(&inti->list); 734 fi->irq_count--; 735 deliver = 1; 736 break; 737 } 738 __set_intercept_indicator(vcpu, inti); 739 } 740 if (list_empty(&fi->list)) 741 atomic_set(&fi->active, 0); 742 spin_unlock(&fi->lock); 743 if (deliver) { 744 __do_deliver_interrupt(vcpu, inti); 745 kfree(inti); 746 } 747 } while (deliver); 748 } 749 } 750 751 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) 752 { 753 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 754 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 755 struct kvm_s390_interrupt_info *n, *inti = NULL; 756 int deliver; 757 758 __reset_intercept_indicators(vcpu); 759 if (atomic_read(&li->active)) { 760 do { 761 deliver = 0; 762 spin_lock_bh(&li->lock); 763 list_for_each_entry_safe(inti, n, &li->list, list) { 764 if ((inti->type == KVM_S390_MCHK) && 765 __interrupt_is_deliverable(vcpu, inti)) { 766 list_del(&inti->list); 767 deliver = 1; 768 break; 769 } 770 __set_intercept_indicator(vcpu, inti); 771 } 772 if (list_empty(&li->list)) 773 atomic_set(&li->active, 0); 774 spin_unlock_bh(&li->lock); 775 if (deliver) { 776 __do_deliver_interrupt(vcpu, inti); 777 kfree(inti); 778 } 779 } while (deliver); 780 } 781 782 if (atomic_read(&fi->active)) { 783 do { 784 deliver = 0; 785 spin_lock(&fi->lock); 786 list_for_each_entry_safe(inti, n, &fi->list, list) { 787 if ((inti->type == KVM_S390_MCHK) && 788 __interrupt_is_deliverable(vcpu, inti)) { 789 list_del(&inti->list); 790 fi->irq_count--; 791 deliver = 1; 792 break; 793 } 794 __set_intercept_indicator(vcpu, inti); 795 } 796 if (list_empty(&fi->list)) 797 atomic_set(&fi->active, 0); 798 spin_unlock(&fi->lock); 799 if (deliver) { 800 __do_deliver_interrupt(vcpu, inti); 801 kfree(inti); 802 } 803 } while (deliver); 804 } 805 } 806 807 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 808 { 809 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 810 struct kvm_s390_interrupt_info *inti; 811 812 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 813 if (!inti) 814 return -ENOMEM; 815 816 inti->type = KVM_S390_PROGRAM_INT; 817 inti->pgm.code = code; 818 819 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 820 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 821 spin_lock_bh(&li->lock); 822 list_add(&inti->list, &li->list); 823 atomic_set(&li->active, 1); 824 BUG_ON(waitqueue_active(li->wq)); 825 spin_unlock_bh(&li->lock); 826 return 0; 827 } 828 829 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, 830 struct kvm_s390_pgm_info *pgm_info) 831 { 832 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 833 struct kvm_s390_interrupt_info *inti; 834 835 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 836 if (!inti) 837 return -ENOMEM; 838 839 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", 840 pgm_info->code); 841 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 842 pgm_info->code, 0, 1); 843 844 inti->type = KVM_S390_PROGRAM_INT; 845 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); 846 spin_lock_bh(&li->lock); 847 list_add(&inti->list, &li->list); 848 atomic_set(&li->active, 1); 849 BUG_ON(waitqueue_active(li->wq)); 850 spin_unlock_bh(&li->lock); 851 return 0; 852 } 853 854 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 855 u64 cr6, u64 schid) 856 { 857 struct kvm_s390_float_interrupt *fi; 858 struct kvm_s390_interrupt_info *inti, *iter; 859 860 if ((!schid && !cr6) || (schid && cr6)) 861 return NULL; 862 mutex_lock(&kvm->lock); 863 fi = &kvm->arch.float_int; 864 spin_lock(&fi->lock); 865 inti = NULL; 866 list_for_each_entry(iter, &fi->list, list) { 867 if (!is_ioint(iter->type)) 868 continue; 869 if (cr6 && 870 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) 871 continue; 872 if (schid) { 873 if (((schid & 0x00000000ffff0000) >> 16) != 874 iter->io.subchannel_id) 875 continue; 876 if ((schid & 0x000000000000ffff) != 877 iter->io.subchannel_nr) 878 continue; 879 } 880 inti = iter; 881 break; 882 } 883 if (inti) { 884 list_del_init(&inti->list); 885 fi->irq_count--; 886 } 887 if (list_empty(&fi->list)) 888 atomic_set(&fi->active, 0); 889 spin_unlock(&fi->lock); 890 mutex_unlock(&kvm->lock); 891 return inti; 892 } 893 894 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 895 { 896 struct kvm_s390_local_interrupt *li; 897 struct kvm_s390_float_interrupt *fi; 898 struct kvm_s390_interrupt_info *iter; 899 struct kvm_vcpu *dst_vcpu = NULL; 900 int sigcpu; 901 int rc = 0; 902 903 mutex_lock(&kvm->lock); 904 fi = &kvm->arch.float_int; 905 spin_lock(&fi->lock); 906 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { 907 rc = -EINVAL; 908 goto unlock_fi; 909 } 910 fi->irq_count++; 911 if (!is_ioint(inti->type)) { 912 list_add_tail(&inti->list, &fi->list); 913 } else { 914 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); 915 916 /* Keep I/O interrupts sorted in isc order. */ 917 list_for_each_entry(iter, &fi->list, list) { 918 if (!is_ioint(iter->type)) 919 continue; 920 if (int_word_to_isc_bits(iter->io.io_int_word) 921 <= isc_bits) 922 continue; 923 break; 924 } 925 list_add_tail(&inti->list, &iter->list); 926 } 927 atomic_set(&fi->active, 1); 928 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 929 if (sigcpu == KVM_MAX_VCPUS) { 930 do { 931 sigcpu = fi->next_rr_cpu++; 932 if (sigcpu == KVM_MAX_VCPUS) 933 sigcpu = fi->next_rr_cpu = 0; 934 } while (kvm_get_vcpu(kvm, sigcpu) == NULL); 935 } 936 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 937 li = &dst_vcpu->arch.local_int; 938 spin_lock_bh(&li->lock); 939 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 940 if (waitqueue_active(li->wq)) 941 wake_up_interruptible(li->wq); 942 kvm_get_vcpu(kvm, sigcpu)->preempted = true; 943 spin_unlock_bh(&li->lock); 944 unlock_fi: 945 spin_unlock(&fi->lock); 946 mutex_unlock(&kvm->lock); 947 return rc; 948 } 949 950 int kvm_s390_inject_vm(struct kvm *kvm, 951 struct kvm_s390_interrupt *s390int) 952 { 953 struct kvm_s390_interrupt_info *inti; 954 955 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 956 if (!inti) 957 return -ENOMEM; 958 959 inti->type = s390int->type; 960 switch (inti->type) { 961 case KVM_S390_INT_VIRTIO: 962 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 963 s390int->parm, s390int->parm64); 964 inti->ext.ext_params = s390int->parm; 965 inti->ext.ext_params2 = s390int->parm64; 966 break; 967 case KVM_S390_INT_SERVICE: 968 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 969 inti->ext.ext_params = s390int->parm; 970 break; 971 case KVM_S390_INT_PFAULT_DONE: 972 inti->type = s390int->type; 973 inti->ext.ext_params2 = s390int->parm64; 974 break; 975 case KVM_S390_MCHK: 976 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 977 s390int->parm64); 978 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 979 inti->mchk.mcic = s390int->parm64; 980 break; 981 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 982 if (inti->type & IOINT_AI_MASK) 983 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 984 else 985 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 986 s390int->type & IOINT_CSSID_MASK, 987 s390int->type & IOINT_SSID_MASK, 988 s390int->type & IOINT_SCHID_MASK); 989 inti->io.subchannel_id = s390int->parm >> 16; 990 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 991 inti->io.io_int_parm = s390int->parm64 >> 32; 992 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 993 break; 994 default: 995 kfree(inti); 996 return -EINVAL; 997 } 998 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 999 2); 1000 1001 return __inject_vm(kvm, inti); 1002 } 1003 1004 void kvm_s390_reinject_io_int(struct kvm *kvm, 1005 struct kvm_s390_interrupt_info *inti) 1006 { 1007 __inject_vm(kvm, inti); 1008 } 1009 1010 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 1011 struct kvm_s390_interrupt *s390int) 1012 { 1013 struct kvm_s390_local_interrupt *li; 1014 struct kvm_s390_interrupt_info *inti; 1015 1016 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1017 if (!inti) 1018 return -ENOMEM; 1019 1020 switch (s390int->type) { 1021 case KVM_S390_PROGRAM_INT: 1022 if (s390int->parm & 0xffff0000) { 1023 kfree(inti); 1024 return -EINVAL; 1025 } 1026 inti->type = s390int->type; 1027 inti->pgm.code = s390int->parm; 1028 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 1029 s390int->parm); 1030 break; 1031 case KVM_S390_SIGP_SET_PREFIX: 1032 inti->prefix.address = s390int->parm; 1033 inti->type = s390int->type; 1034 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 1035 s390int->parm); 1036 break; 1037 case KVM_S390_SIGP_STOP: 1038 case KVM_S390_RESTART: 1039 case KVM_S390_INT_CLOCK_COMP: 1040 case KVM_S390_INT_CPU_TIMER: 1041 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 1042 inti->type = s390int->type; 1043 break; 1044 case KVM_S390_INT_EXTERNAL_CALL: 1045 if (s390int->parm & 0xffff0000) { 1046 kfree(inti); 1047 return -EINVAL; 1048 } 1049 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 1050 s390int->parm); 1051 inti->type = s390int->type; 1052 inti->extcall.code = s390int->parm; 1053 break; 1054 case KVM_S390_INT_EMERGENCY: 1055 if (s390int->parm & 0xffff0000) { 1056 kfree(inti); 1057 return -EINVAL; 1058 } 1059 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); 1060 inti->type = s390int->type; 1061 inti->emerg.code = s390int->parm; 1062 break; 1063 case KVM_S390_MCHK: 1064 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1065 s390int->parm64); 1066 inti->type = s390int->type; 1067 inti->mchk.mcic = s390int->parm64; 1068 break; 1069 case KVM_S390_INT_PFAULT_INIT: 1070 inti->type = s390int->type; 1071 inti->ext.ext_params2 = s390int->parm64; 1072 break; 1073 case KVM_S390_INT_VIRTIO: 1074 case KVM_S390_INT_SERVICE: 1075 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1076 default: 1077 kfree(inti); 1078 return -EINVAL; 1079 } 1080 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 1081 s390int->parm64, 2); 1082 1083 mutex_lock(&vcpu->kvm->lock); 1084 li = &vcpu->arch.local_int; 1085 spin_lock_bh(&li->lock); 1086 if (inti->type == KVM_S390_PROGRAM_INT) 1087 list_add(&inti->list, &li->list); 1088 else 1089 list_add_tail(&inti->list, &li->list); 1090 atomic_set(&li->active, 1); 1091 if (inti->type == KVM_S390_SIGP_STOP) 1092 li->action_bits |= ACTION_STOP_ON_STOP; 1093 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1094 if (waitqueue_active(&vcpu->wq)) 1095 wake_up_interruptible(&vcpu->wq); 1096 vcpu->preempted = true; 1097 spin_unlock_bh(&li->lock); 1098 mutex_unlock(&vcpu->kvm->lock); 1099 return 0; 1100 } 1101 1102 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1103 { 1104 struct kvm_s390_float_interrupt *fi; 1105 struct kvm_s390_interrupt_info *n, *inti = NULL; 1106 1107 mutex_lock(&kvm->lock); 1108 fi = &kvm->arch.float_int; 1109 spin_lock(&fi->lock); 1110 list_for_each_entry_safe(inti, n, &fi->list, list) { 1111 list_del(&inti->list); 1112 kfree(inti); 1113 } 1114 fi->irq_count = 0; 1115 atomic_set(&fi->active, 0); 1116 spin_unlock(&fi->lock); 1117 mutex_unlock(&kvm->lock); 1118 } 1119 1120 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, 1121 u8 *addr) 1122 { 1123 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1124 struct kvm_s390_irq irq = {0}; 1125 1126 irq.type = inti->type; 1127 switch (inti->type) { 1128 case KVM_S390_INT_PFAULT_INIT: 1129 case KVM_S390_INT_PFAULT_DONE: 1130 case KVM_S390_INT_VIRTIO: 1131 case KVM_S390_INT_SERVICE: 1132 irq.u.ext = inti->ext; 1133 break; 1134 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1135 irq.u.io = inti->io; 1136 break; 1137 case KVM_S390_MCHK: 1138 irq.u.mchk = inti->mchk; 1139 break; 1140 default: 1141 return -EINVAL; 1142 } 1143 1144 if (copy_to_user(uptr, &irq, sizeof(irq))) 1145 return -EFAULT; 1146 1147 return 0; 1148 } 1149 1150 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) 1151 { 1152 struct kvm_s390_interrupt_info *inti; 1153 struct kvm_s390_float_interrupt *fi; 1154 int ret = 0; 1155 int n = 0; 1156 1157 mutex_lock(&kvm->lock); 1158 fi = &kvm->arch.float_int; 1159 spin_lock(&fi->lock); 1160 1161 list_for_each_entry(inti, &fi->list, list) { 1162 if (len < sizeof(struct kvm_s390_irq)) { 1163 /* signal userspace to try again */ 1164 ret = -ENOMEM; 1165 break; 1166 } 1167 ret = copy_irq_to_user(inti, buf); 1168 if (ret) 1169 break; 1170 buf += sizeof(struct kvm_s390_irq); 1171 len -= sizeof(struct kvm_s390_irq); 1172 n++; 1173 } 1174 1175 spin_unlock(&fi->lock); 1176 mutex_unlock(&kvm->lock); 1177 1178 return ret < 0 ? ret : n; 1179 } 1180 1181 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1182 { 1183 int r; 1184 1185 switch (attr->group) { 1186 case KVM_DEV_FLIC_GET_ALL_IRQS: 1187 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, 1188 attr->attr); 1189 break; 1190 default: 1191 r = -EINVAL; 1192 } 1193 1194 return r; 1195 } 1196 1197 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1198 u64 addr) 1199 { 1200 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1201 void *target = NULL; 1202 void __user *source; 1203 u64 size; 1204 1205 if (get_user(inti->type, (u64 __user *)addr)) 1206 return -EFAULT; 1207 1208 switch (inti->type) { 1209 case KVM_S390_INT_PFAULT_INIT: 1210 case KVM_S390_INT_PFAULT_DONE: 1211 case KVM_S390_INT_VIRTIO: 1212 case KVM_S390_INT_SERVICE: 1213 target = (void *) &inti->ext; 1214 source = &uptr->u.ext; 1215 size = sizeof(inti->ext); 1216 break; 1217 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1218 target = (void *) &inti->io; 1219 source = &uptr->u.io; 1220 size = sizeof(inti->io); 1221 break; 1222 case KVM_S390_MCHK: 1223 target = (void *) &inti->mchk; 1224 source = &uptr->u.mchk; 1225 size = sizeof(inti->mchk); 1226 break; 1227 default: 1228 return -EINVAL; 1229 } 1230 1231 if (copy_from_user(target, source, size)) 1232 return -EFAULT; 1233 1234 return 0; 1235 } 1236 1237 static int enqueue_floating_irq(struct kvm_device *dev, 1238 struct kvm_device_attr *attr) 1239 { 1240 struct kvm_s390_interrupt_info *inti = NULL; 1241 int r = 0; 1242 int len = attr->attr; 1243 1244 if (len % sizeof(struct kvm_s390_irq) != 0) 1245 return -EINVAL; 1246 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1247 return -EINVAL; 1248 1249 while (len >= sizeof(struct kvm_s390_irq)) { 1250 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1251 if (!inti) 1252 return -ENOMEM; 1253 1254 r = copy_irq_from_user(inti, attr->addr); 1255 if (r) { 1256 kfree(inti); 1257 return r; 1258 } 1259 r = __inject_vm(dev->kvm, inti); 1260 if (r) { 1261 kfree(inti); 1262 return r; 1263 } 1264 len -= sizeof(struct kvm_s390_irq); 1265 attr->addr += sizeof(struct kvm_s390_irq); 1266 } 1267 1268 return r; 1269 } 1270 1271 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1272 { 1273 if (id >= MAX_S390_IO_ADAPTERS) 1274 return NULL; 1275 return kvm->arch.adapters[id]; 1276 } 1277 1278 static int register_io_adapter(struct kvm_device *dev, 1279 struct kvm_device_attr *attr) 1280 { 1281 struct s390_io_adapter *adapter; 1282 struct kvm_s390_io_adapter adapter_info; 1283 1284 if (copy_from_user(&adapter_info, 1285 (void __user *)attr->addr, sizeof(adapter_info))) 1286 return -EFAULT; 1287 1288 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1289 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1290 return -EINVAL; 1291 1292 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1293 if (!adapter) 1294 return -ENOMEM; 1295 1296 INIT_LIST_HEAD(&adapter->maps); 1297 init_rwsem(&adapter->maps_lock); 1298 atomic_set(&adapter->nr_maps, 0); 1299 adapter->id = adapter_info.id; 1300 adapter->isc = adapter_info.isc; 1301 adapter->maskable = adapter_info.maskable; 1302 adapter->masked = false; 1303 adapter->swap = adapter_info.swap; 1304 dev->kvm->arch.adapters[adapter->id] = adapter; 1305 1306 return 0; 1307 } 1308 1309 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1310 { 1311 int ret; 1312 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1313 1314 if (!adapter || !adapter->maskable) 1315 return -EINVAL; 1316 ret = adapter->masked; 1317 adapter->masked = masked; 1318 return ret; 1319 } 1320 1321 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1322 { 1323 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1324 struct s390_map_info *map; 1325 int ret; 1326 1327 if (!adapter || !addr) 1328 return -EINVAL; 1329 1330 map = kzalloc(sizeof(*map), GFP_KERNEL); 1331 if (!map) { 1332 ret = -ENOMEM; 1333 goto out; 1334 } 1335 INIT_LIST_HEAD(&map->list); 1336 map->guest_addr = addr; 1337 map->addr = gmap_translate(addr, kvm->arch.gmap); 1338 if (map->addr == -EFAULT) { 1339 ret = -EFAULT; 1340 goto out; 1341 } 1342 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1343 if (ret < 0) 1344 goto out; 1345 BUG_ON(ret != 1); 1346 down_write(&adapter->maps_lock); 1347 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1348 list_add_tail(&map->list, &adapter->maps); 1349 ret = 0; 1350 } else { 1351 put_page(map->page); 1352 ret = -EINVAL; 1353 } 1354 up_write(&adapter->maps_lock); 1355 out: 1356 if (ret) 1357 kfree(map); 1358 return ret; 1359 } 1360 1361 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1362 { 1363 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1364 struct s390_map_info *map, *tmp; 1365 int found = 0; 1366 1367 if (!adapter || !addr) 1368 return -EINVAL; 1369 1370 down_write(&adapter->maps_lock); 1371 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1372 if (map->guest_addr == addr) { 1373 found = 1; 1374 atomic_dec(&adapter->nr_maps); 1375 list_del(&map->list); 1376 put_page(map->page); 1377 kfree(map); 1378 break; 1379 } 1380 } 1381 up_write(&adapter->maps_lock); 1382 1383 return found ? 0 : -EINVAL; 1384 } 1385 1386 void kvm_s390_destroy_adapters(struct kvm *kvm) 1387 { 1388 int i; 1389 struct s390_map_info *map, *tmp; 1390 1391 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 1392 if (!kvm->arch.adapters[i]) 1393 continue; 1394 list_for_each_entry_safe(map, tmp, 1395 &kvm->arch.adapters[i]->maps, list) { 1396 list_del(&map->list); 1397 put_page(map->page); 1398 kfree(map); 1399 } 1400 kfree(kvm->arch.adapters[i]); 1401 } 1402 } 1403 1404 static int modify_io_adapter(struct kvm_device *dev, 1405 struct kvm_device_attr *attr) 1406 { 1407 struct kvm_s390_io_adapter_req req; 1408 struct s390_io_adapter *adapter; 1409 int ret; 1410 1411 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 1412 return -EFAULT; 1413 1414 adapter = get_io_adapter(dev->kvm, req.id); 1415 if (!adapter) 1416 return -EINVAL; 1417 switch (req.type) { 1418 case KVM_S390_IO_ADAPTER_MASK: 1419 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 1420 if (ret > 0) 1421 ret = 0; 1422 break; 1423 case KVM_S390_IO_ADAPTER_MAP: 1424 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 1425 break; 1426 case KVM_S390_IO_ADAPTER_UNMAP: 1427 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 1428 break; 1429 default: 1430 ret = -EINVAL; 1431 } 1432 1433 return ret; 1434 } 1435 1436 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1437 { 1438 int r = 0; 1439 unsigned int i; 1440 struct kvm_vcpu *vcpu; 1441 1442 switch (attr->group) { 1443 case KVM_DEV_FLIC_ENQUEUE: 1444 r = enqueue_floating_irq(dev, attr); 1445 break; 1446 case KVM_DEV_FLIC_CLEAR_IRQS: 1447 r = 0; 1448 kvm_s390_clear_float_irqs(dev->kvm); 1449 break; 1450 case KVM_DEV_FLIC_APF_ENABLE: 1451 dev->kvm->arch.gmap->pfault_enabled = 1; 1452 break; 1453 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 1454 dev->kvm->arch.gmap->pfault_enabled = 0; 1455 /* 1456 * Make sure no async faults are in transition when 1457 * clearing the queues. So we don't need to worry 1458 * about late coming workers. 1459 */ 1460 synchronize_srcu(&dev->kvm->srcu); 1461 kvm_for_each_vcpu(i, vcpu, dev->kvm) 1462 kvm_clear_async_pf_completion_queue(vcpu); 1463 break; 1464 case KVM_DEV_FLIC_ADAPTER_REGISTER: 1465 r = register_io_adapter(dev, attr); 1466 break; 1467 case KVM_DEV_FLIC_ADAPTER_MODIFY: 1468 r = modify_io_adapter(dev, attr); 1469 break; 1470 default: 1471 r = -EINVAL; 1472 } 1473 1474 return r; 1475 } 1476 1477 static int flic_create(struct kvm_device *dev, u32 type) 1478 { 1479 if (!dev) 1480 return -EINVAL; 1481 if (dev->kvm->arch.flic) 1482 return -EINVAL; 1483 dev->kvm->arch.flic = dev; 1484 return 0; 1485 } 1486 1487 static void flic_destroy(struct kvm_device *dev) 1488 { 1489 dev->kvm->arch.flic = NULL; 1490 kfree(dev); 1491 } 1492 1493 /* s390 floating irq controller (flic) */ 1494 struct kvm_device_ops kvm_flic_ops = { 1495 .name = "kvm-flic", 1496 .get_attr = flic_get_attr, 1497 .set_attr = flic_set_attr, 1498 .create = flic_create, 1499 .destroy = flic_destroy, 1500 }; 1501 1502 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 1503 { 1504 unsigned long bit; 1505 1506 bit = bit_nr + (addr % PAGE_SIZE) * 8; 1507 1508 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 1509 } 1510 1511 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 1512 u64 addr) 1513 { 1514 struct s390_map_info *map; 1515 1516 if (!adapter) 1517 return NULL; 1518 1519 list_for_each_entry(map, &adapter->maps, list) { 1520 if (map->guest_addr == addr) 1521 return map; 1522 } 1523 return NULL; 1524 } 1525 1526 static int adapter_indicators_set(struct kvm *kvm, 1527 struct s390_io_adapter *adapter, 1528 struct kvm_s390_adapter_int *adapter_int) 1529 { 1530 unsigned long bit; 1531 int summary_set, idx; 1532 struct s390_map_info *info; 1533 void *map; 1534 1535 info = get_map_info(adapter, adapter_int->ind_addr); 1536 if (!info) 1537 return -1; 1538 map = page_address(info->page); 1539 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 1540 set_bit(bit, map); 1541 idx = srcu_read_lock(&kvm->srcu); 1542 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1543 set_page_dirty_lock(info->page); 1544 info = get_map_info(adapter, adapter_int->summary_addr); 1545 if (!info) { 1546 srcu_read_unlock(&kvm->srcu, idx); 1547 return -1; 1548 } 1549 map = page_address(info->page); 1550 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 1551 adapter->swap); 1552 summary_set = test_and_set_bit(bit, map); 1553 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1554 set_page_dirty_lock(info->page); 1555 srcu_read_unlock(&kvm->srcu, idx); 1556 return summary_set ? 0 : 1; 1557 } 1558 1559 /* 1560 * < 0 - not injected due to error 1561 * = 0 - coalesced, summary indicator already active 1562 * > 0 - injected interrupt 1563 */ 1564 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 1565 struct kvm *kvm, int irq_source_id, int level, 1566 bool line_status) 1567 { 1568 int ret; 1569 struct s390_io_adapter *adapter; 1570 1571 /* We're only interested in the 0->1 transition. */ 1572 if (!level) 1573 return 0; 1574 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 1575 if (!adapter) 1576 return -1; 1577 down_read(&adapter->maps_lock); 1578 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 1579 up_read(&adapter->maps_lock); 1580 if ((ret > 0) && !adapter->masked) { 1581 struct kvm_s390_interrupt s390int = { 1582 .type = KVM_S390_INT_IO(1, 0, 0, 0), 1583 .parm = 0, 1584 .parm64 = (adapter->isc << 27) | 0x80000000, 1585 }; 1586 ret = kvm_s390_inject_vm(kvm, &s390int); 1587 if (ret == 0) 1588 ret = 1; 1589 } 1590 return ret; 1591 } 1592 1593 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 1594 struct kvm_kernel_irq_routing_entry *e, 1595 const struct kvm_irq_routing_entry *ue) 1596 { 1597 int ret; 1598 1599 switch (ue->type) { 1600 case KVM_IRQ_ROUTING_S390_ADAPTER: 1601 e->set = set_adapter_int; 1602 e->adapter.summary_addr = ue->u.adapter.summary_addr; 1603 e->adapter.ind_addr = ue->u.adapter.ind_addr; 1604 e->adapter.summary_offset = ue->u.adapter.summary_offset; 1605 e->adapter.ind_offset = ue->u.adapter.ind_offset; 1606 e->adapter.adapter_id = ue->u.adapter.adapter_id; 1607 ret = 0; 1608 break; 1609 default: 1610 ret = -EINVAL; 1611 } 1612 1613 return ret; 1614 } 1615 1616 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1617 int irq_source_id, int level, bool line_status) 1618 { 1619 return -EINVAL; 1620 } 1621