1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008,2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/uaccess.h> 21 #include "kvm-s390.h" 22 #include "gaccess.h" 23 #include "trace-s390.h" 24 25 #define IOINT_SCHID_MASK 0x0000ffff 26 #define IOINT_SSID_MASK 0x00030000 27 #define IOINT_CSSID_MASK 0x03fc0000 28 #define IOINT_AI_MASK 0x04000000 29 30 static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu); 31 32 static int is_ioint(u64 type) 33 { 34 return ((type & 0xfffe0000u) != 0xfffe0000u); 35 } 36 37 int psw_extint_disabled(struct kvm_vcpu *vcpu) 38 { 39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 40 } 41 42 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 43 { 44 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 45 } 46 47 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 48 { 49 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 50 } 51 52 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 53 { 54 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 55 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 57 return 0; 58 return 1; 59 } 60 61 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 62 { 63 if (psw_extint_disabled(vcpu) || 64 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 65 return 0; 66 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 67 /* No timer interrupts when single stepping */ 68 return 0; 69 return 1; 70 } 71 72 static u64 int_word_to_isc_bits(u32 int_word) 73 { 74 u8 isc = (int_word & 0x38000000) >> 27; 75 76 return (0x80 >> isc) << 24; 77 } 78 79 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 80 struct kvm_s390_interrupt_info *inti) 81 { 82 switch (inti->type) { 83 case KVM_S390_INT_EXTERNAL_CALL: 84 if (psw_extint_disabled(vcpu)) 85 return 0; 86 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 87 return 1; 88 case KVM_S390_INT_EMERGENCY: 89 if (psw_extint_disabled(vcpu)) 90 return 0; 91 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 92 return 1; 93 return 0; 94 case KVM_S390_INT_CLOCK_COMP: 95 return ckc_interrupts_enabled(vcpu); 96 case KVM_S390_INT_CPU_TIMER: 97 if (psw_extint_disabled(vcpu)) 98 return 0; 99 if (vcpu->arch.sie_block->gcr[0] & 0x400ul) 100 return 1; 101 return 0; 102 case KVM_S390_INT_SERVICE: 103 case KVM_S390_INT_PFAULT_INIT: 104 case KVM_S390_INT_PFAULT_DONE: 105 case KVM_S390_INT_VIRTIO: 106 if (psw_extint_disabled(vcpu)) 107 return 0; 108 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 109 return 1; 110 return 0; 111 case KVM_S390_PROGRAM_INT: 112 case KVM_S390_SIGP_STOP: 113 case KVM_S390_SIGP_SET_PREFIX: 114 case KVM_S390_RESTART: 115 return 1; 116 case KVM_S390_MCHK: 117 if (psw_mchk_disabled(vcpu)) 118 return 0; 119 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) 120 return 1; 121 return 0; 122 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 123 if (psw_ioint_disabled(vcpu)) 124 return 0; 125 if (vcpu->arch.sie_block->gcr[6] & 126 int_word_to_isc_bits(inti->io.io_int_word)) 127 return 1; 128 return 0; 129 default: 130 printk(KERN_WARNING "illegal interrupt type %llx\n", 131 inti->type); 132 BUG(); 133 } 134 return 0; 135 } 136 137 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 138 { 139 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 140 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 141 } 142 143 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 144 { 145 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 146 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 147 } 148 149 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 150 { 151 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 152 &vcpu->arch.sie_block->cpuflags); 153 vcpu->arch.sie_block->lctl = 0x0000; 154 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 155 156 if (guestdbg_enabled(vcpu)) { 157 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 158 LCTL_CR10 | LCTL_CR11); 159 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 160 } 161 } 162 163 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 164 { 165 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 166 } 167 168 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 169 struct kvm_s390_interrupt_info *inti) 170 { 171 switch (inti->type) { 172 case KVM_S390_INT_EXTERNAL_CALL: 173 case KVM_S390_INT_EMERGENCY: 174 case KVM_S390_INT_SERVICE: 175 case KVM_S390_INT_PFAULT_INIT: 176 case KVM_S390_INT_PFAULT_DONE: 177 case KVM_S390_INT_VIRTIO: 178 case KVM_S390_INT_CLOCK_COMP: 179 case KVM_S390_INT_CPU_TIMER: 180 if (psw_extint_disabled(vcpu)) 181 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 182 else 183 vcpu->arch.sie_block->lctl |= LCTL_CR0; 184 break; 185 case KVM_S390_SIGP_STOP: 186 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 187 break; 188 case KVM_S390_MCHK: 189 if (psw_mchk_disabled(vcpu)) 190 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 191 else 192 vcpu->arch.sie_block->lctl |= LCTL_CR14; 193 break; 194 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 195 if (psw_ioint_disabled(vcpu)) 196 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 197 else 198 vcpu->arch.sie_block->lctl |= LCTL_CR6; 199 break; 200 default: 201 BUG(); 202 } 203 } 204 205 static int __deliver_prog_irq(struct kvm_vcpu *vcpu, 206 struct kvm_s390_pgm_info *pgm_info) 207 { 208 const unsigned short table[] = { 2, 4, 4, 6 }; 209 int rc = 0; 210 211 switch (pgm_info->code & ~PGM_PER) { 212 case PGM_AFX_TRANSLATION: 213 case PGM_ASX_TRANSLATION: 214 case PGM_EX_TRANSLATION: 215 case PGM_LFX_TRANSLATION: 216 case PGM_LSTE_SEQUENCE: 217 case PGM_LSX_TRANSLATION: 218 case PGM_LX_TRANSLATION: 219 case PGM_PRIMARY_AUTHORITY: 220 case PGM_SECONDARY_AUTHORITY: 221 case PGM_SPACE_SWITCH: 222 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 223 (u64 *)__LC_TRANS_EXC_CODE); 224 break; 225 case PGM_ALEN_TRANSLATION: 226 case PGM_ALE_SEQUENCE: 227 case PGM_ASTE_INSTANCE: 228 case PGM_ASTE_SEQUENCE: 229 case PGM_ASTE_VALIDITY: 230 case PGM_EXTENDED_AUTHORITY: 231 rc = put_guest_lc(vcpu, pgm_info->exc_access_id, 232 (u8 *)__LC_EXC_ACCESS_ID); 233 break; 234 case PGM_ASCE_TYPE: 235 case PGM_PAGE_TRANSLATION: 236 case PGM_REGION_FIRST_TRANS: 237 case PGM_REGION_SECOND_TRANS: 238 case PGM_REGION_THIRD_TRANS: 239 case PGM_SEGMENT_TRANSLATION: 240 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 241 (u64 *)__LC_TRANS_EXC_CODE); 242 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 243 (u8 *)__LC_EXC_ACCESS_ID); 244 rc |= put_guest_lc(vcpu, pgm_info->op_access_id, 245 (u8 *)__LC_OP_ACCESS_ID); 246 break; 247 case PGM_MONITOR: 248 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, 249 (u64 *)__LC_MON_CLASS_NR); 250 rc |= put_guest_lc(vcpu, pgm_info->mon_code, 251 (u64 *)__LC_MON_CODE); 252 break; 253 case PGM_DATA: 254 rc = put_guest_lc(vcpu, pgm_info->data_exc_code, 255 (u32 *)__LC_DATA_EXC_CODE); 256 break; 257 case PGM_PROTECTION: 258 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 259 (u64 *)__LC_TRANS_EXC_CODE); 260 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 261 (u8 *)__LC_EXC_ACCESS_ID); 262 break; 263 } 264 265 if (pgm_info->code & PGM_PER) { 266 rc |= put_guest_lc(vcpu, pgm_info->per_code, 267 (u8 *) __LC_PER_CODE); 268 rc |= put_guest_lc(vcpu, pgm_info->per_atmid, 269 (u8 *)__LC_PER_ATMID); 270 rc |= put_guest_lc(vcpu, pgm_info->per_address, 271 (u64 *) __LC_PER_ADDRESS); 272 rc |= put_guest_lc(vcpu, pgm_info->per_access_id, 273 (u8 *) __LC_PER_ACCESS_ID); 274 } 275 276 switch (vcpu->arch.sie_block->icptcode) { 277 case ICPT_INST: 278 case ICPT_INSTPROGI: 279 case ICPT_OPEREXC: 280 case ICPT_PARTEXEC: 281 case ICPT_IOINST: 282 /* last instruction only stored for these icptcodes */ 283 rc |= put_guest_lc(vcpu, table[vcpu->arch.sie_block->ipa >> 14], 284 (u16 *) __LC_PGM_ILC); 285 break; 286 case ICPT_PROGI: 287 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->pgmilc, 288 (u16 *) __LC_PGM_ILC); 289 break; 290 default: 291 rc |= put_guest_lc(vcpu, 0, 292 (u16 *) __LC_PGM_ILC); 293 } 294 295 rc |= put_guest_lc(vcpu, pgm_info->code, 296 (u16 *)__LC_PGM_INT_CODE); 297 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 298 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 299 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 300 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 301 302 return rc; 303 } 304 305 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 306 struct kvm_s390_interrupt_info *inti) 307 { 308 const unsigned short table[] = { 2, 4, 4, 6 }; 309 int rc = 0; 310 311 switch (inti->type) { 312 case KVM_S390_INT_EMERGENCY: 313 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 314 vcpu->stat.deliver_emergency_signal++; 315 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 316 inti->emerg.code, 0); 317 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); 318 rc |= put_guest_lc(vcpu, inti->emerg.code, 319 (u16 *)__LC_EXT_CPU_ADDR); 320 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 321 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 322 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 323 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 324 break; 325 case KVM_S390_INT_EXTERNAL_CALL: 326 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 327 vcpu->stat.deliver_external_call++; 328 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 329 inti->extcall.code, 0); 330 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); 331 rc |= put_guest_lc(vcpu, inti->extcall.code, 332 (u16 *)__LC_EXT_CPU_ADDR); 333 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 334 &vcpu->arch.sie_block->gpsw, 335 sizeof(psw_t)); 336 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 337 &vcpu->arch.sie_block->gpsw, 338 sizeof(psw_t)); 339 break; 340 case KVM_S390_INT_CLOCK_COMP: 341 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 342 inti->ext.ext_params, 0); 343 deliver_ckc_interrupt(vcpu); 344 break; 345 case KVM_S390_INT_CPU_TIMER: 346 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 347 inti->ext.ext_params, 0); 348 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 349 (u16 *)__LC_EXT_INT_CODE); 350 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 351 &vcpu->arch.sie_block->gpsw, 352 sizeof(psw_t)); 353 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 354 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 355 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 356 (u32 *)__LC_EXT_PARAMS); 357 break; 358 case KVM_S390_INT_SERVICE: 359 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 360 inti->ext.ext_params); 361 vcpu->stat.deliver_service_signal++; 362 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 363 inti->ext.ext_params, 0); 364 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); 365 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 366 &vcpu->arch.sie_block->gpsw, 367 sizeof(psw_t)); 368 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 369 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 370 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 371 (u32 *)__LC_EXT_PARAMS); 372 break; 373 case KVM_S390_INT_PFAULT_INIT: 374 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 375 inti->ext.ext_params2); 376 rc = put_guest_lc(vcpu, 0x2603, (u16 *) __LC_EXT_INT_CODE); 377 rc |= put_guest_lc(vcpu, 0x0600, (u16 *) __LC_EXT_CPU_ADDR); 378 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 379 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 380 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 381 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 382 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 383 (u64 *) __LC_EXT_PARAMS2); 384 break; 385 case KVM_S390_INT_PFAULT_DONE: 386 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 387 inti->ext.ext_params2); 388 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 389 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); 390 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 391 &vcpu->arch.sie_block->gpsw, 392 sizeof(psw_t)); 393 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 394 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 395 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 396 (u64 *)__LC_EXT_PARAMS2); 397 break; 398 case KVM_S390_INT_VIRTIO: 399 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 400 inti->ext.ext_params, inti->ext.ext_params2); 401 vcpu->stat.deliver_virtio_interrupt++; 402 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 403 inti->ext.ext_params, 404 inti->ext.ext_params2); 405 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 406 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); 407 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 408 &vcpu->arch.sie_block->gpsw, 409 sizeof(psw_t)); 410 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 411 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 412 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 413 (u32 *)__LC_EXT_PARAMS); 414 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 415 (u64 *)__LC_EXT_PARAMS2); 416 break; 417 case KVM_S390_SIGP_STOP: 418 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 419 vcpu->stat.deliver_stop_signal++; 420 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 421 0, 0); 422 __set_intercept_indicator(vcpu, inti); 423 break; 424 425 case KVM_S390_SIGP_SET_PREFIX: 426 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 427 inti->prefix.address); 428 vcpu->stat.deliver_prefix_signal++; 429 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 430 inti->prefix.address, 0); 431 kvm_s390_set_prefix(vcpu, inti->prefix.address); 432 break; 433 434 case KVM_S390_RESTART: 435 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 436 vcpu->stat.deliver_restart_signal++; 437 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 438 0, 0); 439 rc = write_guest_lc(vcpu, 440 offsetof(struct _lowcore, restart_old_psw), 441 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 442 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), 443 &vcpu->arch.sie_block->gpsw, 444 sizeof(psw_t)); 445 break; 446 case KVM_S390_PROGRAM_INT: 447 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 448 inti->pgm.code, 449 table[vcpu->arch.sie_block->ipa >> 14]); 450 vcpu->stat.deliver_program_int++; 451 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 452 inti->pgm.code, 0); 453 rc = __deliver_prog_irq(vcpu, &inti->pgm); 454 break; 455 456 case KVM_S390_MCHK: 457 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 458 inti->mchk.mcic); 459 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 460 inti->mchk.cr14, 461 inti->mchk.mcic); 462 rc = kvm_s390_vcpu_store_status(vcpu, 463 KVM_S390_STORE_STATUS_PREFIXED); 464 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); 465 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 466 &vcpu->arch.sie_block->gpsw, 467 sizeof(psw_t)); 468 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 469 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 470 break; 471 472 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 473 { 474 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | 475 inti->io.subchannel_nr; 476 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | 477 inti->io.io_int_word; 478 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 479 vcpu->stat.deliver_io_int++; 480 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 481 param0, param1); 482 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 483 (u16 *)__LC_SUBCHANNEL_ID); 484 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 485 (u16 *)__LC_SUBCHANNEL_NR); 486 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 487 (u32 *)__LC_IO_INT_PARM); 488 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 489 (u32 *)__LC_IO_INT_WORD); 490 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 491 &vcpu->arch.sie_block->gpsw, 492 sizeof(psw_t)); 493 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 494 &vcpu->arch.sie_block->gpsw, 495 sizeof(psw_t)); 496 break; 497 } 498 default: 499 BUG(); 500 } 501 if (rc) { 502 printk("kvm: The guest lowcore is not mapped during interrupt " 503 "delivery, killing userspace\n"); 504 do_exit(SIGKILL); 505 } 506 } 507 508 static void deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 509 { 510 int rc; 511 512 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); 513 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 514 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 515 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 516 &vcpu->arch.sie_block->gpsw, 517 sizeof(psw_t)); 518 if (rc) { 519 printk("kvm: The guest lowcore is not mapped during interrupt " 520 "delivery, killing userspace\n"); 521 do_exit(SIGKILL); 522 } 523 } 524 525 /* Check whether SIGP interpretation facility has an external call pending */ 526 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) 527 { 528 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; 529 530 if (!psw_extint_disabled(vcpu) && 531 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && 532 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && 533 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 534 return 1; 535 536 return 0; 537 } 538 539 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 540 { 541 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 542 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 543 struct kvm_s390_interrupt_info *inti; 544 int rc = 0; 545 546 if (atomic_read(&li->active)) { 547 spin_lock_bh(&li->lock); 548 list_for_each_entry(inti, &li->list, list) 549 if (__interrupt_is_deliverable(vcpu, inti)) { 550 rc = 1; 551 break; 552 } 553 spin_unlock_bh(&li->lock); 554 } 555 556 if ((!rc) && atomic_read(&fi->active)) { 557 spin_lock(&fi->lock); 558 list_for_each_entry(inti, &fi->list, list) 559 if (__interrupt_is_deliverable(vcpu, inti)) { 560 rc = 1; 561 break; 562 } 563 spin_unlock(&fi->lock); 564 } 565 566 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 567 rc = 1; 568 569 if (!rc && kvm_s390_si_ext_call_pending(vcpu)) 570 rc = 1; 571 572 return rc; 573 } 574 575 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 576 { 577 if (!(vcpu->arch.sie_block->ckc < 578 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 579 return 0; 580 if (!ckc_interrupts_enabled(vcpu)) 581 return 0; 582 return 1; 583 } 584 585 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 586 { 587 u64 now, sltime; 588 589 vcpu->stat.exit_wait_state++; 590 591 /* fast path */ 592 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) 593 return 0; 594 595 if (psw_interrupts_disabled(vcpu)) { 596 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 597 return -EOPNOTSUPP; /* disabled wait */ 598 } 599 600 __set_cpu_idle(vcpu); 601 if (!ckc_interrupts_enabled(vcpu)) { 602 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 603 goto no_timer; 604 } 605 606 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 607 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 608 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 609 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 610 no_timer: 611 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 612 kvm_vcpu_block(vcpu); 613 __unset_cpu_idle(vcpu); 614 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 615 616 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 617 return 0; 618 } 619 620 void kvm_s390_tasklet(unsigned long parm) 621 { 622 struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; 623 624 if (waitqueue_active(&vcpu->wq)) 625 wake_up_interruptible(&vcpu->wq); 626 } 627 628 /* 629 * low level hrtimer wake routine. Because this runs in hardirq context 630 * we schedule a tasklet to do the real work. 631 */ 632 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 633 { 634 struct kvm_vcpu *vcpu; 635 636 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 637 vcpu->preempted = true; 638 tasklet_schedule(&vcpu->arch.tasklet); 639 640 return HRTIMER_NORESTART; 641 } 642 643 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 644 { 645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 646 struct kvm_s390_interrupt_info *n, *inti = NULL; 647 648 spin_lock_bh(&li->lock); 649 list_for_each_entry_safe(inti, n, &li->list, list) { 650 list_del(&inti->list); 651 kfree(inti); 652 } 653 atomic_set(&li->active, 0); 654 spin_unlock_bh(&li->lock); 655 656 /* clear pending external calls set by sigp interpretation facility */ 657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 658 atomic_clear_mask(SIGP_CTRL_C, 659 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); 660 } 661 662 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 663 { 664 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 665 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 666 struct kvm_s390_interrupt_info *n, *inti = NULL; 667 int deliver; 668 669 __reset_intercept_indicators(vcpu); 670 if (atomic_read(&li->active)) { 671 do { 672 deliver = 0; 673 spin_lock_bh(&li->lock); 674 list_for_each_entry_safe(inti, n, &li->list, list) { 675 if (__interrupt_is_deliverable(vcpu, inti)) { 676 list_del(&inti->list); 677 deliver = 1; 678 break; 679 } 680 __set_intercept_indicator(vcpu, inti); 681 } 682 if (list_empty(&li->list)) 683 atomic_set(&li->active, 0); 684 spin_unlock_bh(&li->lock); 685 if (deliver) { 686 __do_deliver_interrupt(vcpu, inti); 687 kfree(inti); 688 } 689 } while (deliver); 690 } 691 692 if (kvm_cpu_has_pending_timer(vcpu)) 693 deliver_ckc_interrupt(vcpu); 694 695 if (atomic_read(&fi->active)) { 696 do { 697 deliver = 0; 698 spin_lock(&fi->lock); 699 list_for_each_entry_safe(inti, n, &fi->list, list) { 700 if (__interrupt_is_deliverable(vcpu, inti)) { 701 list_del(&inti->list); 702 fi->irq_count--; 703 deliver = 1; 704 break; 705 } 706 __set_intercept_indicator(vcpu, inti); 707 } 708 if (list_empty(&fi->list)) 709 atomic_set(&fi->active, 0); 710 spin_unlock(&fi->lock); 711 if (deliver) { 712 __do_deliver_interrupt(vcpu, inti); 713 kfree(inti); 714 } 715 } while (deliver); 716 } 717 } 718 719 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) 720 { 721 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 722 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 723 struct kvm_s390_interrupt_info *n, *inti = NULL; 724 int deliver; 725 726 __reset_intercept_indicators(vcpu); 727 if (atomic_read(&li->active)) { 728 do { 729 deliver = 0; 730 spin_lock_bh(&li->lock); 731 list_for_each_entry_safe(inti, n, &li->list, list) { 732 if ((inti->type == KVM_S390_MCHK) && 733 __interrupt_is_deliverable(vcpu, inti)) { 734 list_del(&inti->list); 735 deliver = 1; 736 break; 737 } 738 __set_intercept_indicator(vcpu, inti); 739 } 740 if (list_empty(&li->list)) 741 atomic_set(&li->active, 0); 742 spin_unlock_bh(&li->lock); 743 if (deliver) { 744 __do_deliver_interrupt(vcpu, inti); 745 kfree(inti); 746 } 747 } while (deliver); 748 } 749 750 if (atomic_read(&fi->active)) { 751 do { 752 deliver = 0; 753 spin_lock(&fi->lock); 754 list_for_each_entry_safe(inti, n, &fi->list, list) { 755 if ((inti->type == KVM_S390_MCHK) && 756 __interrupt_is_deliverable(vcpu, inti)) { 757 list_del(&inti->list); 758 fi->irq_count--; 759 deliver = 1; 760 break; 761 } 762 __set_intercept_indicator(vcpu, inti); 763 } 764 if (list_empty(&fi->list)) 765 atomic_set(&fi->active, 0); 766 spin_unlock(&fi->lock); 767 if (deliver) { 768 __do_deliver_interrupt(vcpu, inti); 769 kfree(inti); 770 } 771 } while (deliver); 772 } 773 } 774 775 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 776 { 777 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 778 struct kvm_s390_interrupt_info *inti; 779 780 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 781 if (!inti) 782 return -ENOMEM; 783 784 inti->type = KVM_S390_PROGRAM_INT; 785 inti->pgm.code = code; 786 787 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 788 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 789 spin_lock_bh(&li->lock); 790 list_add(&inti->list, &li->list); 791 atomic_set(&li->active, 1); 792 BUG_ON(waitqueue_active(li->wq)); 793 spin_unlock_bh(&li->lock); 794 return 0; 795 } 796 797 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, 798 struct kvm_s390_pgm_info *pgm_info) 799 { 800 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 801 struct kvm_s390_interrupt_info *inti; 802 803 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 804 if (!inti) 805 return -ENOMEM; 806 807 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", 808 pgm_info->code); 809 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 810 pgm_info->code, 0, 1); 811 812 inti->type = KVM_S390_PROGRAM_INT; 813 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); 814 spin_lock_bh(&li->lock); 815 list_add(&inti->list, &li->list); 816 atomic_set(&li->active, 1); 817 BUG_ON(waitqueue_active(li->wq)); 818 spin_unlock_bh(&li->lock); 819 return 0; 820 } 821 822 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 823 u64 cr6, u64 schid) 824 { 825 struct kvm_s390_float_interrupt *fi; 826 struct kvm_s390_interrupt_info *inti, *iter; 827 828 if ((!schid && !cr6) || (schid && cr6)) 829 return NULL; 830 mutex_lock(&kvm->lock); 831 fi = &kvm->arch.float_int; 832 spin_lock(&fi->lock); 833 inti = NULL; 834 list_for_each_entry(iter, &fi->list, list) { 835 if (!is_ioint(iter->type)) 836 continue; 837 if (cr6 && 838 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) 839 continue; 840 if (schid) { 841 if (((schid & 0x00000000ffff0000) >> 16) != 842 iter->io.subchannel_id) 843 continue; 844 if ((schid & 0x000000000000ffff) != 845 iter->io.subchannel_nr) 846 continue; 847 } 848 inti = iter; 849 break; 850 } 851 if (inti) { 852 list_del_init(&inti->list); 853 fi->irq_count--; 854 } 855 if (list_empty(&fi->list)) 856 atomic_set(&fi->active, 0); 857 spin_unlock(&fi->lock); 858 mutex_unlock(&kvm->lock); 859 return inti; 860 } 861 862 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 863 { 864 struct kvm_s390_local_interrupt *li; 865 struct kvm_s390_float_interrupt *fi; 866 struct kvm_s390_interrupt_info *iter; 867 struct kvm_vcpu *dst_vcpu = NULL; 868 int sigcpu; 869 int rc = 0; 870 871 mutex_lock(&kvm->lock); 872 fi = &kvm->arch.float_int; 873 spin_lock(&fi->lock); 874 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { 875 rc = -EINVAL; 876 goto unlock_fi; 877 } 878 fi->irq_count++; 879 if (!is_ioint(inti->type)) { 880 list_add_tail(&inti->list, &fi->list); 881 } else { 882 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); 883 884 /* Keep I/O interrupts sorted in isc order. */ 885 list_for_each_entry(iter, &fi->list, list) { 886 if (!is_ioint(iter->type)) 887 continue; 888 if (int_word_to_isc_bits(iter->io.io_int_word) 889 <= isc_bits) 890 continue; 891 break; 892 } 893 list_add_tail(&inti->list, &iter->list); 894 } 895 atomic_set(&fi->active, 1); 896 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 897 if (sigcpu == KVM_MAX_VCPUS) { 898 do { 899 sigcpu = fi->next_rr_cpu++; 900 if (sigcpu == KVM_MAX_VCPUS) 901 sigcpu = fi->next_rr_cpu = 0; 902 } while (kvm_get_vcpu(kvm, sigcpu) == NULL); 903 } 904 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 905 li = &dst_vcpu->arch.local_int; 906 spin_lock_bh(&li->lock); 907 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 908 if (waitqueue_active(li->wq)) 909 wake_up_interruptible(li->wq); 910 kvm_get_vcpu(kvm, sigcpu)->preempted = true; 911 spin_unlock_bh(&li->lock); 912 unlock_fi: 913 spin_unlock(&fi->lock); 914 mutex_unlock(&kvm->lock); 915 return rc; 916 } 917 918 int kvm_s390_inject_vm(struct kvm *kvm, 919 struct kvm_s390_interrupt *s390int) 920 { 921 struct kvm_s390_interrupt_info *inti; 922 923 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 924 if (!inti) 925 return -ENOMEM; 926 927 inti->type = s390int->type; 928 switch (inti->type) { 929 case KVM_S390_INT_VIRTIO: 930 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 931 s390int->parm, s390int->parm64); 932 inti->ext.ext_params = s390int->parm; 933 inti->ext.ext_params2 = s390int->parm64; 934 break; 935 case KVM_S390_INT_SERVICE: 936 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 937 inti->ext.ext_params = s390int->parm; 938 break; 939 case KVM_S390_INT_PFAULT_DONE: 940 inti->type = s390int->type; 941 inti->ext.ext_params2 = s390int->parm64; 942 break; 943 case KVM_S390_MCHK: 944 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 945 s390int->parm64); 946 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 947 inti->mchk.mcic = s390int->parm64; 948 break; 949 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 950 if (inti->type & IOINT_AI_MASK) 951 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 952 else 953 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 954 s390int->type & IOINT_CSSID_MASK, 955 s390int->type & IOINT_SSID_MASK, 956 s390int->type & IOINT_SCHID_MASK); 957 inti->io.subchannel_id = s390int->parm >> 16; 958 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 959 inti->io.io_int_parm = s390int->parm64 >> 32; 960 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 961 break; 962 default: 963 kfree(inti); 964 return -EINVAL; 965 } 966 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 967 2); 968 969 return __inject_vm(kvm, inti); 970 } 971 972 void kvm_s390_reinject_io_int(struct kvm *kvm, 973 struct kvm_s390_interrupt_info *inti) 974 { 975 __inject_vm(kvm, inti); 976 } 977 978 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 979 struct kvm_s390_interrupt *s390int) 980 { 981 struct kvm_s390_local_interrupt *li; 982 struct kvm_s390_interrupt_info *inti; 983 984 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 985 if (!inti) 986 return -ENOMEM; 987 988 switch (s390int->type) { 989 case KVM_S390_PROGRAM_INT: 990 if (s390int->parm & 0xffff0000) { 991 kfree(inti); 992 return -EINVAL; 993 } 994 inti->type = s390int->type; 995 inti->pgm.code = s390int->parm; 996 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 997 s390int->parm); 998 break; 999 case KVM_S390_SIGP_SET_PREFIX: 1000 inti->prefix.address = s390int->parm; 1001 inti->type = s390int->type; 1002 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 1003 s390int->parm); 1004 break; 1005 case KVM_S390_SIGP_STOP: 1006 case KVM_S390_RESTART: 1007 case KVM_S390_INT_CLOCK_COMP: 1008 case KVM_S390_INT_CPU_TIMER: 1009 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 1010 inti->type = s390int->type; 1011 break; 1012 case KVM_S390_INT_EXTERNAL_CALL: 1013 if (s390int->parm & 0xffff0000) { 1014 kfree(inti); 1015 return -EINVAL; 1016 } 1017 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 1018 s390int->parm); 1019 inti->type = s390int->type; 1020 inti->extcall.code = s390int->parm; 1021 break; 1022 case KVM_S390_INT_EMERGENCY: 1023 if (s390int->parm & 0xffff0000) { 1024 kfree(inti); 1025 return -EINVAL; 1026 } 1027 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); 1028 inti->type = s390int->type; 1029 inti->emerg.code = s390int->parm; 1030 break; 1031 case KVM_S390_MCHK: 1032 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 1033 s390int->parm64); 1034 inti->type = s390int->type; 1035 inti->mchk.mcic = s390int->parm64; 1036 break; 1037 case KVM_S390_INT_PFAULT_INIT: 1038 inti->type = s390int->type; 1039 inti->ext.ext_params2 = s390int->parm64; 1040 break; 1041 case KVM_S390_INT_VIRTIO: 1042 case KVM_S390_INT_SERVICE: 1043 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1044 default: 1045 kfree(inti); 1046 return -EINVAL; 1047 } 1048 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 1049 s390int->parm64, 2); 1050 1051 mutex_lock(&vcpu->kvm->lock); 1052 li = &vcpu->arch.local_int; 1053 spin_lock_bh(&li->lock); 1054 if (inti->type == KVM_S390_PROGRAM_INT) 1055 list_add(&inti->list, &li->list); 1056 else 1057 list_add_tail(&inti->list, &li->list); 1058 atomic_set(&li->active, 1); 1059 if (inti->type == KVM_S390_SIGP_STOP) 1060 li->action_bits |= ACTION_STOP_ON_STOP; 1061 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1062 if (waitqueue_active(&vcpu->wq)) 1063 wake_up_interruptible(&vcpu->wq); 1064 vcpu->preempted = true; 1065 spin_unlock_bh(&li->lock); 1066 mutex_unlock(&vcpu->kvm->lock); 1067 return 0; 1068 } 1069 1070 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1071 { 1072 struct kvm_s390_float_interrupt *fi; 1073 struct kvm_s390_interrupt_info *n, *inti = NULL; 1074 1075 mutex_lock(&kvm->lock); 1076 fi = &kvm->arch.float_int; 1077 spin_lock(&fi->lock); 1078 list_for_each_entry_safe(inti, n, &fi->list, list) { 1079 list_del(&inti->list); 1080 kfree(inti); 1081 } 1082 fi->irq_count = 0; 1083 atomic_set(&fi->active, 0); 1084 spin_unlock(&fi->lock); 1085 mutex_unlock(&kvm->lock); 1086 } 1087 1088 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, 1089 u8 *addr) 1090 { 1091 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1092 struct kvm_s390_irq irq = {0}; 1093 1094 irq.type = inti->type; 1095 switch (inti->type) { 1096 case KVM_S390_INT_PFAULT_INIT: 1097 case KVM_S390_INT_PFAULT_DONE: 1098 case KVM_S390_INT_VIRTIO: 1099 case KVM_S390_INT_SERVICE: 1100 irq.u.ext = inti->ext; 1101 break; 1102 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1103 irq.u.io = inti->io; 1104 break; 1105 case KVM_S390_MCHK: 1106 irq.u.mchk = inti->mchk; 1107 break; 1108 default: 1109 return -EINVAL; 1110 } 1111 1112 if (copy_to_user(uptr, &irq, sizeof(irq))) 1113 return -EFAULT; 1114 1115 return 0; 1116 } 1117 1118 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) 1119 { 1120 struct kvm_s390_interrupt_info *inti; 1121 struct kvm_s390_float_interrupt *fi; 1122 int ret = 0; 1123 int n = 0; 1124 1125 mutex_lock(&kvm->lock); 1126 fi = &kvm->arch.float_int; 1127 spin_lock(&fi->lock); 1128 1129 list_for_each_entry(inti, &fi->list, list) { 1130 if (len < sizeof(struct kvm_s390_irq)) { 1131 /* signal userspace to try again */ 1132 ret = -ENOMEM; 1133 break; 1134 } 1135 ret = copy_irq_to_user(inti, buf); 1136 if (ret) 1137 break; 1138 buf += sizeof(struct kvm_s390_irq); 1139 len -= sizeof(struct kvm_s390_irq); 1140 n++; 1141 } 1142 1143 spin_unlock(&fi->lock); 1144 mutex_unlock(&kvm->lock); 1145 1146 return ret < 0 ? ret : n; 1147 } 1148 1149 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1150 { 1151 int r; 1152 1153 switch (attr->group) { 1154 case KVM_DEV_FLIC_GET_ALL_IRQS: 1155 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, 1156 attr->attr); 1157 break; 1158 default: 1159 r = -EINVAL; 1160 } 1161 1162 return r; 1163 } 1164 1165 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1166 u64 addr) 1167 { 1168 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1169 void *target = NULL; 1170 void __user *source; 1171 u64 size; 1172 1173 if (get_user(inti->type, (u64 __user *)addr)) 1174 return -EFAULT; 1175 1176 switch (inti->type) { 1177 case KVM_S390_INT_PFAULT_INIT: 1178 case KVM_S390_INT_PFAULT_DONE: 1179 case KVM_S390_INT_VIRTIO: 1180 case KVM_S390_INT_SERVICE: 1181 target = (void *) &inti->ext; 1182 source = &uptr->u.ext; 1183 size = sizeof(inti->ext); 1184 break; 1185 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1186 target = (void *) &inti->io; 1187 source = &uptr->u.io; 1188 size = sizeof(inti->io); 1189 break; 1190 case KVM_S390_MCHK: 1191 target = (void *) &inti->mchk; 1192 source = &uptr->u.mchk; 1193 size = sizeof(inti->mchk); 1194 break; 1195 default: 1196 return -EINVAL; 1197 } 1198 1199 if (copy_from_user(target, source, size)) 1200 return -EFAULT; 1201 1202 return 0; 1203 } 1204 1205 static int enqueue_floating_irq(struct kvm_device *dev, 1206 struct kvm_device_attr *attr) 1207 { 1208 struct kvm_s390_interrupt_info *inti = NULL; 1209 int r = 0; 1210 int len = attr->attr; 1211 1212 if (len % sizeof(struct kvm_s390_irq) != 0) 1213 return -EINVAL; 1214 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1215 return -EINVAL; 1216 1217 while (len >= sizeof(struct kvm_s390_irq)) { 1218 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1219 if (!inti) 1220 return -ENOMEM; 1221 1222 r = copy_irq_from_user(inti, attr->addr); 1223 if (r) { 1224 kfree(inti); 1225 return r; 1226 } 1227 r = __inject_vm(dev->kvm, inti); 1228 if (r) { 1229 kfree(inti); 1230 return r; 1231 } 1232 len -= sizeof(struct kvm_s390_irq); 1233 attr->addr += sizeof(struct kvm_s390_irq); 1234 } 1235 1236 return r; 1237 } 1238 1239 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1240 { 1241 if (id >= MAX_S390_IO_ADAPTERS) 1242 return NULL; 1243 return kvm->arch.adapters[id]; 1244 } 1245 1246 static int register_io_adapter(struct kvm_device *dev, 1247 struct kvm_device_attr *attr) 1248 { 1249 struct s390_io_adapter *adapter; 1250 struct kvm_s390_io_adapter adapter_info; 1251 1252 if (copy_from_user(&adapter_info, 1253 (void __user *)attr->addr, sizeof(adapter_info))) 1254 return -EFAULT; 1255 1256 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1257 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1258 return -EINVAL; 1259 1260 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1261 if (!adapter) 1262 return -ENOMEM; 1263 1264 INIT_LIST_HEAD(&adapter->maps); 1265 init_rwsem(&adapter->maps_lock); 1266 atomic_set(&adapter->nr_maps, 0); 1267 adapter->id = adapter_info.id; 1268 adapter->isc = adapter_info.isc; 1269 adapter->maskable = adapter_info.maskable; 1270 adapter->masked = false; 1271 adapter->swap = adapter_info.swap; 1272 dev->kvm->arch.adapters[adapter->id] = adapter; 1273 1274 return 0; 1275 } 1276 1277 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1278 { 1279 int ret; 1280 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1281 1282 if (!adapter || !adapter->maskable) 1283 return -EINVAL; 1284 ret = adapter->masked; 1285 adapter->masked = masked; 1286 return ret; 1287 } 1288 1289 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1290 { 1291 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1292 struct s390_map_info *map; 1293 int ret; 1294 1295 if (!adapter || !addr) 1296 return -EINVAL; 1297 1298 map = kzalloc(sizeof(*map), GFP_KERNEL); 1299 if (!map) { 1300 ret = -ENOMEM; 1301 goto out; 1302 } 1303 INIT_LIST_HEAD(&map->list); 1304 map->guest_addr = addr; 1305 map->addr = gmap_translate(addr, kvm->arch.gmap); 1306 if (map->addr == -EFAULT) { 1307 ret = -EFAULT; 1308 goto out; 1309 } 1310 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1311 if (ret < 0) 1312 goto out; 1313 BUG_ON(ret != 1); 1314 down_write(&adapter->maps_lock); 1315 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1316 list_add_tail(&map->list, &adapter->maps); 1317 ret = 0; 1318 } else { 1319 put_page(map->page); 1320 ret = -EINVAL; 1321 } 1322 up_write(&adapter->maps_lock); 1323 out: 1324 if (ret) 1325 kfree(map); 1326 return ret; 1327 } 1328 1329 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1330 { 1331 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1332 struct s390_map_info *map, *tmp; 1333 int found = 0; 1334 1335 if (!adapter || !addr) 1336 return -EINVAL; 1337 1338 down_write(&adapter->maps_lock); 1339 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1340 if (map->guest_addr == addr) { 1341 found = 1; 1342 atomic_dec(&adapter->nr_maps); 1343 list_del(&map->list); 1344 put_page(map->page); 1345 kfree(map); 1346 break; 1347 } 1348 } 1349 up_write(&adapter->maps_lock); 1350 1351 return found ? 0 : -EINVAL; 1352 } 1353 1354 void kvm_s390_destroy_adapters(struct kvm *kvm) 1355 { 1356 int i; 1357 struct s390_map_info *map, *tmp; 1358 1359 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 1360 if (!kvm->arch.adapters[i]) 1361 continue; 1362 list_for_each_entry_safe(map, tmp, 1363 &kvm->arch.adapters[i]->maps, list) { 1364 list_del(&map->list); 1365 put_page(map->page); 1366 kfree(map); 1367 } 1368 kfree(kvm->arch.adapters[i]); 1369 } 1370 } 1371 1372 static int modify_io_adapter(struct kvm_device *dev, 1373 struct kvm_device_attr *attr) 1374 { 1375 struct kvm_s390_io_adapter_req req; 1376 struct s390_io_adapter *adapter; 1377 int ret; 1378 1379 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 1380 return -EFAULT; 1381 1382 adapter = get_io_adapter(dev->kvm, req.id); 1383 if (!adapter) 1384 return -EINVAL; 1385 switch (req.type) { 1386 case KVM_S390_IO_ADAPTER_MASK: 1387 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 1388 if (ret > 0) 1389 ret = 0; 1390 break; 1391 case KVM_S390_IO_ADAPTER_MAP: 1392 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 1393 break; 1394 case KVM_S390_IO_ADAPTER_UNMAP: 1395 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 1396 break; 1397 default: 1398 ret = -EINVAL; 1399 } 1400 1401 return ret; 1402 } 1403 1404 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1405 { 1406 int r = 0; 1407 unsigned int i; 1408 struct kvm_vcpu *vcpu; 1409 1410 switch (attr->group) { 1411 case KVM_DEV_FLIC_ENQUEUE: 1412 r = enqueue_floating_irq(dev, attr); 1413 break; 1414 case KVM_DEV_FLIC_CLEAR_IRQS: 1415 r = 0; 1416 kvm_s390_clear_float_irqs(dev->kvm); 1417 break; 1418 case KVM_DEV_FLIC_APF_ENABLE: 1419 dev->kvm->arch.gmap->pfault_enabled = 1; 1420 break; 1421 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 1422 dev->kvm->arch.gmap->pfault_enabled = 0; 1423 /* 1424 * Make sure no async faults are in transition when 1425 * clearing the queues. So we don't need to worry 1426 * about late coming workers. 1427 */ 1428 synchronize_srcu(&dev->kvm->srcu); 1429 kvm_for_each_vcpu(i, vcpu, dev->kvm) 1430 kvm_clear_async_pf_completion_queue(vcpu); 1431 break; 1432 case KVM_DEV_FLIC_ADAPTER_REGISTER: 1433 r = register_io_adapter(dev, attr); 1434 break; 1435 case KVM_DEV_FLIC_ADAPTER_MODIFY: 1436 r = modify_io_adapter(dev, attr); 1437 break; 1438 default: 1439 r = -EINVAL; 1440 } 1441 1442 return r; 1443 } 1444 1445 static int flic_create(struct kvm_device *dev, u32 type) 1446 { 1447 if (!dev) 1448 return -EINVAL; 1449 if (dev->kvm->arch.flic) 1450 return -EINVAL; 1451 dev->kvm->arch.flic = dev; 1452 return 0; 1453 } 1454 1455 static void flic_destroy(struct kvm_device *dev) 1456 { 1457 dev->kvm->arch.flic = NULL; 1458 kfree(dev); 1459 } 1460 1461 /* s390 floating irq controller (flic) */ 1462 struct kvm_device_ops kvm_flic_ops = { 1463 .name = "kvm-flic", 1464 .get_attr = flic_get_attr, 1465 .set_attr = flic_set_attr, 1466 .create = flic_create, 1467 .destroy = flic_destroy, 1468 }; 1469 1470 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 1471 { 1472 unsigned long bit; 1473 1474 bit = bit_nr + (addr % PAGE_SIZE) * 8; 1475 1476 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 1477 } 1478 1479 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 1480 u64 addr) 1481 { 1482 struct s390_map_info *map; 1483 1484 if (!adapter) 1485 return NULL; 1486 1487 list_for_each_entry(map, &adapter->maps, list) { 1488 if (map->guest_addr == addr) 1489 return map; 1490 } 1491 return NULL; 1492 } 1493 1494 static int adapter_indicators_set(struct kvm *kvm, 1495 struct s390_io_adapter *adapter, 1496 struct kvm_s390_adapter_int *adapter_int) 1497 { 1498 unsigned long bit; 1499 int summary_set, idx; 1500 struct s390_map_info *info; 1501 void *map; 1502 1503 info = get_map_info(adapter, adapter_int->ind_addr); 1504 if (!info) 1505 return -1; 1506 map = page_address(info->page); 1507 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 1508 set_bit(bit, map); 1509 idx = srcu_read_lock(&kvm->srcu); 1510 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1511 set_page_dirty_lock(info->page); 1512 info = get_map_info(adapter, adapter_int->summary_addr); 1513 if (!info) { 1514 srcu_read_unlock(&kvm->srcu, idx); 1515 return -1; 1516 } 1517 map = page_address(info->page); 1518 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 1519 adapter->swap); 1520 summary_set = test_and_set_bit(bit, map); 1521 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1522 set_page_dirty_lock(info->page); 1523 srcu_read_unlock(&kvm->srcu, idx); 1524 return summary_set ? 0 : 1; 1525 } 1526 1527 /* 1528 * < 0 - not injected due to error 1529 * = 0 - coalesced, summary indicator already active 1530 * > 0 - injected interrupt 1531 */ 1532 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 1533 struct kvm *kvm, int irq_source_id, int level, 1534 bool line_status) 1535 { 1536 int ret; 1537 struct s390_io_adapter *adapter; 1538 1539 /* We're only interested in the 0->1 transition. */ 1540 if (!level) 1541 return 0; 1542 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 1543 if (!adapter) 1544 return -1; 1545 down_read(&adapter->maps_lock); 1546 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 1547 up_read(&adapter->maps_lock); 1548 if ((ret > 0) && !adapter->masked) { 1549 struct kvm_s390_interrupt s390int = { 1550 .type = KVM_S390_INT_IO(1, 0, 0, 0), 1551 .parm = 0, 1552 .parm64 = (adapter->isc << 27) | 0x80000000, 1553 }; 1554 ret = kvm_s390_inject_vm(kvm, &s390int); 1555 if (ret == 0) 1556 ret = 1; 1557 } 1558 return ret; 1559 } 1560 1561 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt, 1562 struct kvm_kernel_irq_routing_entry *e, 1563 const struct kvm_irq_routing_entry *ue) 1564 { 1565 int ret; 1566 1567 switch (ue->type) { 1568 case KVM_IRQ_ROUTING_S390_ADAPTER: 1569 e->set = set_adapter_int; 1570 e->adapter.summary_addr = ue->u.adapter.summary_addr; 1571 e->adapter.ind_addr = ue->u.adapter.ind_addr; 1572 e->adapter.summary_offset = ue->u.adapter.summary_offset; 1573 e->adapter.ind_offset = ue->u.adapter.ind_offset; 1574 e->adapter.adapter_id = ue->u.adapter.adapter_id; 1575 ret = 0; 1576 break; 1577 default: 1578 ret = -EINVAL; 1579 } 1580 1581 return ret; 1582 } 1583 1584 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1585 int irq_source_id, int level, bool line_status) 1586 { 1587 return -EINVAL; 1588 } 1589