1 /* 2 * handling kvm guest interrupts 3 * 4 * Copyright IBM Corp. 2008,2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 */ 12 13 #include <linux/interrupt.h> 14 #include <linux/kvm_host.h> 15 #include <linux/hrtimer.h> 16 #include <linux/mmu_context.h> 17 #include <linux/signal.h> 18 #include <linux/slab.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/uaccess.h> 21 #include "kvm-s390.h" 22 #include "gaccess.h" 23 #include "trace-s390.h" 24 25 #define IOINT_SCHID_MASK 0x0000ffff 26 #define IOINT_SSID_MASK 0x00030000 27 #define IOINT_CSSID_MASK 0x03fc0000 28 #define IOINT_AI_MASK 0x04000000 29 #define PFAULT_INIT 0x0600 30 31 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu); 32 33 static int is_ioint(u64 type) 34 { 35 return ((type & 0xfffe0000u) != 0xfffe0000u); 36 } 37 38 int psw_extint_disabled(struct kvm_vcpu *vcpu) 39 { 40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); 41 } 42 43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) 44 { 45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); 46 } 47 48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) 49 { 50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); 51 } 52 53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) 54 { 55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || 56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || 57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) 58 return 0; 59 return 1; 60 } 61 62 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) 63 { 64 if (psw_extint_disabled(vcpu) || 65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) 66 return 0; 67 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) 68 /* No timer interrupts when single stepping */ 69 return 0; 70 return 1; 71 } 72 73 static u64 int_word_to_isc_bits(u32 int_word) 74 { 75 u8 isc = (int_word & 0x38000000) >> 27; 76 77 return (0x80 >> isc) << 24; 78 } 79 80 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 81 struct kvm_s390_interrupt_info *inti) 82 { 83 switch (inti->type) { 84 case KVM_S390_INT_EXTERNAL_CALL: 85 if (psw_extint_disabled(vcpu)) 86 return 0; 87 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) 88 return 1; 89 return 0; 90 case KVM_S390_INT_EMERGENCY: 91 if (psw_extint_disabled(vcpu)) 92 return 0; 93 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) 94 return 1; 95 return 0; 96 case KVM_S390_INT_CLOCK_COMP: 97 return ckc_interrupts_enabled(vcpu); 98 case KVM_S390_INT_CPU_TIMER: 99 if (psw_extint_disabled(vcpu)) 100 return 0; 101 if (vcpu->arch.sie_block->gcr[0] & 0x400ul) 102 return 1; 103 return 0; 104 case KVM_S390_INT_SERVICE: 105 case KVM_S390_INT_PFAULT_INIT: 106 case KVM_S390_INT_PFAULT_DONE: 107 case KVM_S390_INT_VIRTIO: 108 if (psw_extint_disabled(vcpu)) 109 return 0; 110 if (vcpu->arch.sie_block->gcr[0] & 0x200ul) 111 return 1; 112 return 0; 113 case KVM_S390_PROGRAM_INT: 114 case KVM_S390_SIGP_STOP: 115 case KVM_S390_SIGP_SET_PREFIX: 116 case KVM_S390_RESTART: 117 return 1; 118 case KVM_S390_MCHK: 119 if (psw_mchk_disabled(vcpu)) 120 return 0; 121 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) 122 return 1; 123 return 0; 124 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 125 if (psw_ioint_disabled(vcpu)) 126 return 0; 127 if (vcpu->arch.sie_block->gcr[6] & 128 int_word_to_isc_bits(inti->io.io_int_word)) 129 return 1; 130 return 0; 131 default: 132 printk(KERN_WARNING "illegal interrupt type %llx\n", 133 inti->type); 134 BUG(); 135 } 136 return 0; 137 } 138 139 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 140 { 141 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 142 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 143 } 144 145 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 146 { 147 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 148 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 149 } 150 151 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 152 { 153 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 154 &vcpu->arch.sie_block->cpuflags); 155 vcpu->arch.sie_block->lctl = 0x0000; 156 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 157 158 if (guestdbg_enabled(vcpu)) { 159 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | 160 LCTL_CR10 | LCTL_CR11); 161 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); 162 } 163 164 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) 165 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 166 } 167 168 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 169 { 170 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 171 } 172 173 static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 174 struct kvm_s390_interrupt_info *inti) 175 { 176 switch (inti->type) { 177 case KVM_S390_INT_EXTERNAL_CALL: 178 case KVM_S390_INT_EMERGENCY: 179 case KVM_S390_INT_SERVICE: 180 case KVM_S390_INT_PFAULT_INIT: 181 case KVM_S390_INT_PFAULT_DONE: 182 case KVM_S390_INT_VIRTIO: 183 case KVM_S390_INT_CLOCK_COMP: 184 case KVM_S390_INT_CPU_TIMER: 185 if (psw_extint_disabled(vcpu)) 186 __set_cpuflag(vcpu, CPUSTAT_EXT_INT); 187 else 188 vcpu->arch.sie_block->lctl |= LCTL_CR0; 189 break; 190 case KVM_S390_SIGP_STOP: 191 __set_cpuflag(vcpu, CPUSTAT_STOP_INT); 192 break; 193 case KVM_S390_MCHK: 194 if (psw_mchk_disabled(vcpu)) 195 vcpu->arch.sie_block->ictl |= ICTL_LPSW; 196 else 197 vcpu->arch.sie_block->lctl |= LCTL_CR14; 198 break; 199 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 200 if (psw_ioint_disabled(vcpu)) 201 __set_cpuflag(vcpu, CPUSTAT_IO_INT); 202 else 203 vcpu->arch.sie_block->lctl |= LCTL_CR6; 204 break; 205 default: 206 BUG(); 207 } 208 } 209 210 static u16 get_ilc(struct kvm_vcpu *vcpu) 211 { 212 const unsigned short table[] = { 2, 4, 4, 6 }; 213 214 switch (vcpu->arch.sie_block->icptcode) { 215 case ICPT_INST: 216 case ICPT_INSTPROGI: 217 case ICPT_OPEREXC: 218 case ICPT_PARTEXEC: 219 case ICPT_IOINST: 220 /* last instruction only stored for these icptcodes */ 221 return table[vcpu->arch.sie_block->ipa >> 14]; 222 case ICPT_PROGI: 223 return vcpu->arch.sie_block->pgmilc; 224 default: 225 return 0; 226 } 227 } 228 229 static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, 230 struct kvm_s390_pgm_info *pgm_info) 231 { 232 int rc = 0; 233 u16 ilc = get_ilc(vcpu); 234 235 switch (pgm_info->code & ~PGM_PER) { 236 case PGM_AFX_TRANSLATION: 237 case PGM_ASX_TRANSLATION: 238 case PGM_EX_TRANSLATION: 239 case PGM_LFX_TRANSLATION: 240 case PGM_LSTE_SEQUENCE: 241 case PGM_LSX_TRANSLATION: 242 case PGM_LX_TRANSLATION: 243 case PGM_PRIMARY_AUTHORITY: 244 case PGM_SECONDARY_AUTHORITY: 245 case PGM_SPACE_SWITCH: 246 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 247 (u64 *)__LC_TRANS_EXC_CODE); 248 break; 249 case PGM_ALEN_TRANSLATION: 250 case PGM_ALE_SEQUENCE: 251 case PGM_ASTE_INSTANCE: 252 case PGM_ASTE_SEQUENCE: 253 case PGM_ASTE_VALIDITY: 254 case PGM_EXTENDED_AUTHORITY: 255 rc = put_guest_lc(vcpu, pgm_info->exc_access_id, 256 (u8 *)__LC_EXC_ACCESS_ID); 257 break; 258 case PGM_ASCE_TYPE: 259 case PGM_PAGE_TRANSLATION: 260 case PGM_REGION_FIRST_TRANS: 261 case PGM_REGION_SECOND_TRANS: 262 case PGM_REGION_THIRD_TRANS: 263 case PGM_SEGMENT_TRANSLATION: 264 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 265 (u64 *)__LC_TRANS_EXC_CODE); 266 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 267 (u8 *)__LC_EXC_ACCESS_ID); 268 rc |= put_guest_lc(vcpu, pgm_info->op_access_id, 269 (u8 *)__LC_OP_ACCESS_ID); 270 break; 271 case PGM_MONITOR: 272 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, 273 (u64 *)__LC_MON_CLASS_NR); 274 rc |= put_guest_lc(vcpu, pgm_info->mon_code, 275 (u64 *)__LC_MON_CODE); 276 break; 277 case PGM_DATA: 278 rc = put_guest_lc(vcpu, pgm_info->data_exc_code, 279 (u32 *)__LC_DATA_EXC_CODE); 280 break; 281 case PGM_PROTECTION: 282 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, 283 (u64 *)__LC_TRANS_EXC_CODE); 284 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, 285 (u8 *)__LC_EXC_ACCESS_ID); 286 break; 287 } 288 289 if (pgm_info->code & PGM_PER) { 290 rc |= put_guest_lc(vcpu, pgm_info->per_code, 291 (u8 *) __LC_PER_CODE); 292 rc |= put_guest_lc(vcpu, pgm_info->per_atmid, 293 (u8 *)__LC_PER_ATMID); 294 rc |= put_guest_lc(vcpu, pgm_info->per_address, 295 (u64 *) __LC_PER_ADDRESS); 296 rc |= put_guest_lc(vcpu, pgm_info->per_access_id, 297 (u8 *) __LC_PER_ACCESS_ID); 298 } 299 300 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); 301 rc |= put_guest_lc(vcpu, pgm_info->code, 302 (u16 *)__LC_PGM_INT_CODE); 303 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 305 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, 306 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 307 308 return rc; 309 } 310 311 static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, 312 struct kvm_s390_interrupt_info *inti) 313 { 314 const unsigned short table[] = { 2, 4, 4, 6 }; 315 int rc = 0; 316 317 switch (inti->type) { 318 case KVM_S390_INT_EMERGENCY: 319 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); 320 vcpu->stat.deliver_emergency_signal++; 321 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 322 inti->emerg.code, 0); 323 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); 324 rc |= put_guest_lc(vcpu, inti->emerg.code, 325 (u16 *)__LC_EXT_CPU_ADDR); 326 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 327 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 328 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 329 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 330 break; 331 case KVM_S390_INT_EXTERNAL_CALL: 332 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); 333 vcpu->stat.deliver_external_call++; 334 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 335 inti->extcall.code, 0); 336 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); 337 rc |= put_guest_lc(vcpu, inti->extcall.code, 338 (u16 *)__LC_EXT_CPU_ADDR); 339 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 340 &vcpu->arch.sie_block->gpsw, 341 sizeof(psw_t)); 342 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 343 &vcpu->arch.sie_block->gpsw, 344 sizeof(psw_t)); 345 break; 346 case KVM_S390_INT_CLOCK_COMP: 347 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 348 inti->ext.ext_params, 0); 349 rc = deliver_ckc_interrupt(vcpu); 350 break; 351 case KVM_S390_INT_CPU_TIMER: 352 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 353 inti->ext.ext_params, 0); 354 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, 355 (u16 *)__LC_EXT_INT_CODE); 356 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 357 &vcpu->arch.sie_block->gpsw, 358 sizeof(psw_t)); 359 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 360 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 361 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 362 (u32 *)__LC_EXT_PARAMS); 363 break; 364 case KVM_S390_INT_SERVICE: 365 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", 366 inti->ext.ext_params); 367 vcpu->stat.deliver_service_signal++; 368 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 369 inti->ext.ext_params, 0); 370 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); 371 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 372 &vcpu->arch.sie_block->gpsw, 373 sizeof(psw_t)); 374 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 375 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 376 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 377 (u32 *)__LC_EXT_PARAMS); 378 break; 379 case KVM_S390_INT_PFAULT_INIT: 380 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 381 inti->ext.ext_params2); 382 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, 383 (u16 *) __LC_EXT_INT_CODE); 384 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); 385 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 387 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 389 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 390 (u64 *) __LC_EXT_PARAMS2); 391 break; 392 case KVM_S390_INT_PFAULT_DONE: 393 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, 394 inti->ext.ext_params2); 395 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 396 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); 397 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 398 &vcpu->arch.sie_block->gpsw, 399 sizeof(psw_t)); 400 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 402 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 403 (u64 *)__LC_EXT_PARAMS2); 404 break; 405 case KVM_S390_INT_VIRTIO: 406 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", 407 inti->ext.ext_params, inti->ext.ext_params2); 408 vcpu->stat.deliver_virtio_interrupt++; 409 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 410 inti->ext.ext_params, 411 inti->ext.ext_params2); 412 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); 413 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); 414 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 415 &vcpu->arch.sie_block->gpsw, 416 sizeof(psw_t)); 417 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 418 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 419 rc |= put_guest_lc(vcpu, inti->ext.ext_params, 420 (u32 *)__LC_EXT_PARAMS); 421 rc |= put_guest_lc(vcpu, inti->ext.ext_params2, 422 (u64 *)__LC_EXT_PARAMS2); 423 break; 424 case KVM_S390_SIGP_STOP: 425 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); 426 vcpu->stat.deliver_stop_signal++; 427 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 428 0, 0); 429 __set_intercept_indicator(vcpu, inti); 430 break; 431 432 case KVM_S390_SIGP_SET_PREFIX: 433 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", 434 inti->prefix.address); 435 vcpu->stat.deliver_prefix_signal++; 436 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 437 inti->prefix.address, 0); 438 kvm_s390_set_prefix(vcpu, inti->prefix.address); 439 break; 440 441 case KVM_S390_RESTART: 442 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); 443 vcpu->stat.deliver_restart_signal++; 444 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 445 0, 0); 446 rc = write_guest_lc(vcpu, 447 offsetof(struct _lowcore, restart_old_psw), 448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 449 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), 450 &vcpu->arch.sie_block->gpsw, 451 sizeof(psw_t)); 452 break; 453 case KVM_S390_PROGRAM_INT: 454 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", 455 inti->pgm.code, 456 table[vcpu->arch.sie_block->ipa >> 14]); 457 vcpu->stat.deliver_program_int++; 458 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 459 inti->pgm.code, 0); 460 rc = __deliver_prog_irq(vcpu, &inti->pgm); 461 break; 462 463 case KVM_S390_MCHK: 464 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", 465 inti->mchk.mcic); 466 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 467 inti->mchk.cr14, 468 inti->mchk.mcic); 469 rc = kvm_s390_vcpu_store_status(vcpu, 470 KVM_S390_STORE_STATUS_PREFIXED); 471 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); 472 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, 473 &vcpu->arch.sie_block->gpsw, 474 sizeof(psw_t)); 475 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, 476 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 477 break; 478 479 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 480 { 481 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | 482 inti->io.subchannel_nr; 483 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | 484 inti->io.io_int_word; 485 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); 486 vcpu->stat.deliver_io_int++; 487 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 488 param0, param1); 489 rc = put_guest_lc(vcpu, inti->io.subchannel_id, 490 (u16 *)__LC_SUBCHANNEL_ID); 491 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, 492 (u16 *)__LC_SUBCHANNEL_NR); 493 rc |= put_guest_lc(vcpu, inti->io.io_int_parm, 494 (u32 *)__LC_IO_INT_PARM); 495 rc |= put_guest_lc(vcpu, inti->io.io_int_word, 496 (u32 *)__LC_IO_INT_WORD); 497 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, 498 &vcpu->arch.sie_block->gpsw, 499 sizeof(psw_t)); 500 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, 501 &vcpu->arch.sie_block->gpsw, 502 sizeof(psw_t)); 503 break; 504 } 505 default: 506 BUG(); 507 } 508 509 return rc; 510 } 511 512 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu) 513 { 514 int rc; 515 516 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); 517 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, 518 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 519 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, 520 &vcpu->arch.sie_block->gpsw, 521 sizeof(psw_t)); 522 return rc; 523 } 524 525 /* Check whether SIGP interpretation facility has an external call pending */ 526 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) 527 { 528 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; 529 530 if (!psw_extint_disabled(vcpu) && 531 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && 532 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && 533 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) 534 return 1; 535 536 return 0; 537 } 538 539 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 540 { 541 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 542 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 543 struct kvm_s390_interrupt_info *inti; 544 int rc = 0; 545 546 if (atomic_read(&li->active)) { 547 spin_lock(&li->lock); 548 list_for_each_entry(inti, &li->list, list) 549 if (__interrupt_is_deliverable(vcpu, inti)) { 550 rc = 1; 551 break; 552 } 553 spin_unlock(&li->lock); 554 } 555 556 if ((!rc) && atomic_read(&fi->active)) { 557 spin_lock(&fi->lock); 558 list_for_each_entry(inti, &fi->list, list) 559 if (__interrupt_is_deliverable(vcpu, inti)) { 560 rc = 1; 561 break; 562 } 563 spin_unlock(&fi->lock); 564 } 565 566 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 567 rc = 1; 568 569 if (!rc && kvm_s390_si_ext_call_pending(vcpu)) 570 rc = 1; 571 572 return rc; 573 } 574 575 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 576 { 577 if (!(vcpu->arch.sie_block->ckc < 578 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 579 return 0; 580 if (!ckc_interrupts_enabled(vcpu)) 581 return 0; 582 return 1; 583 } 584 585 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) 586 { 587 u64 now, sltime; 588 589 vcpu->stat.exit_wait_state++; 590 591 /* fast path */ 592 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) 593 return 0; 594 595 if (psw_interrupts_disabled(vcpu)) { 596 VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); 597 return -EOPNOTSUPP; /* disabled wait */ 598 } 599 600 __set_cpu_idle(vcpu); 601 if (!ckc_interrupts_enabled(vcpu)) { 602 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); 603 goto no_timer; 604 } 605 606 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 607 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); 608 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); 609 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); 610 no_timer: 611 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 612 kvm_vcpu_block(vcpu); 613 __unset_cpu_idle(vcpu); 614 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 615 616 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); 617 return 0; 618 } 619 620 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) 621 { 622 if (waitqueue_active(&vcpu->wq)) { 623 /* 624 * The vcpu gave up the cpu voluntarily, mark it as a good 625 * yield-candidate. 626 */ 627 vcpu->preempted = true; 628 wake_up_interruptible(&vcpu->wq); 629 vcpu->stat.halt_wakeup++; 630 } 631 } 632 633 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) 634 { 635 struct kvm_vcpu *vcpu; 636 637 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); 638 kvm_s390_vcpu_wakeup(vcpu); 639 640 return HRTIMER_NORESTART; 641 } 642 643 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) 644 { 645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 646 struct kvm_s390_interrupt_info *n, *inti = NULL; 647 648 spin_lock(&li->lock); 649 list_for_each_entry_safe(inti, n, &li->list, list) { 650 list_del(&inti->list); 651 kfree(inti); 652 } 653 atomic_set(&li->active, 0); 654 spin_unlock(&li->lock); 655 656 /* clear pending external calls set by sigp interpretation facility */ 657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 658 atomic_clear_mask(SIGP_CTRL_C, 659 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); 660 } 661 662 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 663 { 664 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 665 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; 666 struct kvm_s390_interrupt_info *n, *inti = NULL; 667 int deliver; 668 int rc = 0; 669 670 __reset_intercept_indicators(vcpu); 671 if (atomic_read(&li->active)) { 672 do { 673 deliver = 0; 674 spin_lock(&li->lock); 675 list_for_each_entry_safe(inti, n, &li->list, list) { 676 if (__interrupt_is_deliverable(vcpu, inti)) { 677 list_del(&inti->list); 678 deliver = 1; 679 break; 680 } 681 __set_intercept_indicator(vcpu, inti); 682 } 683 if (list_empty(&li->list)) 684 atomic_set(&li->active, 0); 685 spin_unlock(&li->lock); 686 if (deliver) { 687 rc = __do_deliver_interrupt(vcpu, inti); 688 kfree(inti); 689 } 690 } while (!rc && deliver); 691 } 692 693 if (!rc && kvm_cpu_has_pending_timer(vcpu)) 694 rc = deliver_ckc_interrupt(vcpu); 695 696 if (!rc && atomic_read(&fi->active)) { 697 do { 698 deliver = 0; 699 spin_lock(&fi->lock); 700 list_for_each_entry_safe(inti, n, &fi->list, list) { 701 if (__interrupt_is_deliverable(vcpu, inti)) { 702 list_del(&inti->list); 703 fi->irq_count--; 704 deliver = 1; 705 break; 706 } 707 __set_intercept_indicator(vcpu, inti); 708 } 709 if (list_empty(&fi->list)) 710 atomic_set(&fi->active, 0); 711 spin_unlock(&fi->lock); 712 if (deliver) { 713 rc = __do_deliver_interrupt(vcpu, inti); 714 kfree(inti); 715 } 716 } while (!rc && deliver); 717 } 718 719 return rc; 720 } 721 722 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 723 { 724 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 725 struct kvm_s390_interrupt_info *inti; 726 727 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 728 if (!inti) 729 return -ENOMEM; 730 731 inti->type = KVM_S390_PROGRAM_INT; 732 inti->pgm.code = code; 733 734 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); 735 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); 736 spin_lock(&li->lock); 737 list_add(&inti->list, &li->list); 738 atomic_set(&li->active, 1); 739 BUG_ON(waitqueue_active(li->wq)); 740 spin_unlock(&li->lock); 741 return 0; 742 } 743 744 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, 745 struct kvm_s390_pgm_info *pgm_info) 746 { 747 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; 748 struct kvm_s390_interrupt_info *inti; 749 750 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 751 if (!inti) 752 return -ENOMEM; 753 754 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", 755 pgm_info->code); 756 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, 757 pgm_info->code, 0, 1); 758 759 inti->type = KVM_S390_PROGRAM_INT; 760 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); 761 spin_lock(&li->lock); 762 list_add(&inti->list, &li->list); 763 atomic_set(&li->active, 1); 764 BUG_ON(waitqueue_active(li->wq)); 765 spin_unlock(&li->lock); 766 return 0; 767 } 768 769 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, 770 u64 cr6, u64 schid) 771 { 772 struct kvm_s390_float_interrupt *fi; 773 struct kvm_s390_interrupt_info *inti, *iter; 774 775 if ((!schid && !cr6) || (schid && cr6)) 776 return NULL; 777 mutex_lock(&kvm->lock); 778 fi = &kvm->arch.float_int; 779 spin_lock(&fi->lock); 780 inti = NULL; 781 list_for_each_entry(iter, &fi->list, list) { 782 if (!is_ioint(iter->type)) 783 continue; 784 if (cr6 && 785 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) 786 continue; 787 if (schid) { 788 if (((schid & 0x00000000ffff0000) >> 16) != 789 iter->io.subchannel_id) 790 continue; 791 if ((schid & 0x000000000000ffff) != 792 iter->io.subchannel_nr) 793 continue; 794 } 795 inti = iter; 796 break; 797 } 798 if (inti) { 799 list_del_init(&inti->list); 800 fi->irq_count--; 801 } 802 if (list_empty(&fi->list)) 803 atomic_set(&fi->active, 0); 804 spin_unlock(&fi->lock); 805 mutex_unlock(&kvm->lock); 806 return inti; 807 } 808 809 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) 810 { 811 struct kvm_s390_local_interrupt *li; 812 struct kvm_s390_float_interrupt *fi; 813 struct kvm_s390_interrupt_info *iter; 814 struct kvm_vcpu *dst_vcpu = NULL; 815 int sigcpu; 816 int rc = 0; 817 818 mutex_lock(&kvm->lock); 819 fi = &kvm->arch.float_int; 820 spin_lock(&fi->lock); 821 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { 822 rc = -EINVAL; 823 goto unlock_fi; 824 } 825 fi->irq_count++; 826 if (!is_ioint(inti->type)) { 827 list_add_tail(&inti->list, &fi->list); 828 } else { 829 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); 830 831 /* Keep I/O interrupts sorted in isc order. */ 832 list_for_each_entry(iter, &fi->list, list) { 833 if (!is_ioint(iter->type)) 834 continue; 835 if (int_word_to_isc_bits(iter->io.io_int_word) 836 <= isc_bits) 837 continue; 838 break; 839 } 840 list_add_tail(&inti->list, &iter->list); 841 } 842 atomic_set(&fi->active, 1); 843 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); 844 if (sigcpu == KVM_MAX_VCPUS) { 845 do { 846 sigcpu = fi->next_rr_cpu++; 847 if (sigcpu == KVM_MAX_VCPUS) 848 sigcpu = fi->next_rr_cpu = 0; 849 } while (kvm_get_vcpu(kvm, sigcpu) == NULL); 850 } 851 dst_vcpu = kvm_get_vcpu(kvm, sigcpu); 852 li = &dst_vcpu->arch.local_int; 853 spin_lock(&li->lock); 854 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 855 spin_unlock(&li->lock); 856 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); 857 unlock_fi: 858 spin_unlock(&fi->lock); 859 mutex_unlock(&kvm->lock); 860 return rc; 861 } 862 863 int kvm_s390_inject_vm(struct kvm *kvm, 864 struct kvm_s390_interrupt *s390int) 865 { 866 struct kvm_s390_interrupt_info *inti; 867 868 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 869 if (!inti) 870 return -ENOMEM; 871 872 inti->type = s390int->type; 873 switch (inti->type) { 874 case KVM_S390_INT_VIRTIO: 875 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", 876 s390int->parm, s390int->parm64); 877 inti->ext.ext_params = s390int->parm; 878 inti->ext.ext_params2 = s390int->parm64; 879 break; 880 case KVM_S390_INT_SERVICE: 881 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); 882 inti->ext.ext_params = s390int->parm; 883 break; 884 case KVM_S390_INT_PFAULT_DONE: 885 inti->type = s390int->type; 886 inti->ext.ext_params2 = s390int->parm64; 887 break; 888 case KVM_S390_MCHK: 889 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", 890 s390int->parm64); 891 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ 892 inti->mchk.mcic = s390int->parm64; 893 break; 894 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 895 if (inti->type & IOINT_AI_MASK) 896 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); 897 else 898 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", 899 s390int->type & IOINT_CSSID_MASK, 900 s390int->type & IOINT_SSID_MASK, 901 s390int->type & IOINT_SCHID_MASK); 902 inti->io.subchannel_id = s390int->parm >> 16; 903 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; 904 inti->io.io_int_parm = s390int->parm64 >> 32; 905 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; 906 break; 907 default: 908 kfree(inti); 909 return -EINVAL; 910 } 911 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 912 2); 913 914 return __inject_vm(kvm, inti); 915 } 916 917 void kvm_s390_reinject_io_int(struct kvm *kvm, 918 struct kvm_s390_interrupt_info *inti) 919 { 920 __inject_vm(kvm, inti); 921 } 922 923 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 924 struct kvm_s390_interrupt *s390int) 925 { 926 struct kvm_s390_local_interrupt *li; 927 struct kvm_s390_interrupt_info *inti; 928 929 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 930 if (!inti) 931 return -ENOMEM; 932 933 switch (s390int->type) { 934 case KVM_S390_PROGRAM_INT: 935 if (s390int->parm & 0xffff0000) { 936 kfree(inti); 937 return -EINVAL; 938 } 939 inti->type = s390int->type; 940 inti->pgm.code = s390int->parm; 941 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", 942 s390int->parm); 943 break; 944 case KVM_S390_SIGP_SET_PREFIX: 945 inti->prefix.address = s390int->parm; 946 inti->type = s390int->type; 947 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", 948 s390int->parm); 949 break; 950 case KVM_S390_SIGP_STOP: 951 case KVM_S390_RESTART: 952 case KVM_S390_INT_CLOCK_COMP: 953 case KVM_S390_INT_CPU_TIMER: 954 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); 955 inti->type = s390int->type; 956 break; 957 case KVM_S390_INT_EXTERNAL_CALL: 958 if (s390int->parm & 0xffff0000) { 959 kfree(inti); 960 return -EINVAL; 961 } 962 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", 963 s390int->parm); 964 inti->type = s390int->type; 965 inti->extcall.code = s390int->parm; 966 break; 967 case KVM_S390_INT_EMERGENCY: 968 if (s390int->parm & 0xffff0000) { 969 kfree(inti); 970 return -EINVAL; 971 } 972 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); 973 inti->type = s390int->type; 974 inti->emerg.code = s390int->parm; 975 break; 976 case KVM_S390_MCHK: 977 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", 978 s390int->parm64); 979 inti->type = s390int->type; 980 inti->mchk.mcic = s390int->parm64; 981 break; 982 case KVM_S390_INT_PFAULT_INIT: 983 inti->type = s390int->type; 984 inti->ext.ext_params2 = s390int->parm64; 985 break; 986 case KVM_S390_INT_VIRTIO: 987 case KVM_S390_INT_SERVICE: 988 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 989 default: 990 kfree(inti); 991 return -EINVAL; 992 } 993 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, 994 s390int->parm64, 2); 995 996 li = &vcpu->arch.local_int; 997 spin_lock(&li->lock); 998 if (inti->type == KVM_S390_PROGRAM_INT) 999 list_add(&inti->list, &li->list); 1000 else 1001 list_add_tail(&inti->list, &li->list); 1002 atomic_set(&li->active, 1); 1003 if (inti->type == KVM_S390_SIGP_STOP) 1004 li->action_bits |= ACTION_STOP_ON_STOP; 1005 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1006 spin_unlock(&li->lock); 1007 kvm_s390_vcpu_wakeup(vcpu); 1008 return 0; 1009 } 1010 1011 void kvm_s390_clear_float_irqs(struct kvm *kvm) 1012 { 1013 struct kvm_s390_float_interrupt *fi; 1014 struct kvm_s390_interrupt_info *n, *inti = NULL; 1015 1016 mutex_lock(&kvm->lock); 1017 fi = &kvm->arch.float_int; 1018 spin_lock(&fi->lock); 1019 list_for_each_entry_safe(inti, n, &fi->list, list) { 1020 list_del(&inti->list); 1021 kfree(inti); 1022 } 1023 fi->irq_count = 0; 1024 atomic_set(&fi->active, 0); 1025 spin_unlock(&fi->lock); 1026 mutex_unlock(&kvm->lock); 1027 } 1028 1029 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, 1030 u8 *addr) 1031 { 1032 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1033 struct kvm_s390_irq irq = {0}; 1034 1035 irq.type = inti->type; 1036 switch (inti->type) { 1037 case KVM_S390_INT_PFAULT_INIT: 1038 case KVM_S390_INT_PFAULT_DONE: 1039 case KVM_S390_INT_VIRTIO: 1040 case KVM_S390_INT_SERVICE: 1041 irq.u.ext = inti->ext; 1042 break; 1043 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1044 irq.u.io = inti->io; 1045 break; 1046 case KVM_S390_MCHK: 1047 irq.u.mchk = inti->mchk; 1048 break; 1049 default: 1050 return -EINVAL; 1051 } 1052 1053 if (copy_to_user(uptr, &irq, sizeof(irq))) 1054 return -EFAULT; 1055 1056 return 0; 1057 } 1058 1059 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) 1060 { 1061 struct kvm_s390_interrupt_info *inti; 1062 struct kvm_s390_float_interrupt *fi; 1063 int ret = 0; 1064 int n = 0; 1065 1066 mutex_lock(&kvm->lock); 1067 fi = &kvm->arch.float_int; 1068 spin_lock(&fi->lock); 1069 1070 list_for_each_entry(inti, &fi->list, list) { 1071 if (len < sizeof(struct kvm_s390_irq)) { 1072 /* signal userspace to try again */ 1073 ret = -ENOMEM; 1074 break; 1075 } 1076 ret = copy_irq_to_user(inti, buf); 1077 if (ret) 1078 break; 1079 buf += sizeof(struct kvm_s390_irq); 1080 len -= sizeof(struct kvm_s390_irq); 1081 n++; 1082 } 1083 1084 spin_unlock(&fi->lock); 1085 mutex_unlock(&kvm->lock); 1086 1087 return ret < 0 ? ret : n; 1088 } 1089 1090 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1091 { 1092 int r; 1093 1094 switch (attr->group) { 1095 case KVM_DEV_FLIC_GET_ALL_IRQS: 1096 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr, 1097 attr->attr); 1098 break; 1099 default: 1100 r = -EINVAL; 1101 } 1102 1103 return r; 1104 } 1105 1106 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, 1107 u64 addr) 1108 { 1109 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; 1110 void *target = NULL; 1111 void __user *source; 1112 u64 size; 1113 1114 if (get_user(inti->type, (u64 __user *)addr)) 1115 return -EFAULT; 1116 1117 switch (inti->type) { 1118 case KVM_S390_INT_PFAULT_INIT: 1119 case KVM_S390_INT_PFAULT_DONE: 1120 case KVM_S390_INT_VIRTIO: 1121 case KVM_S390_INT_SERVICE: 1122 target = (void *) &inti->ext; 1123 source = &uptr->u.ext; 1124 size = sizeof(inti->ext); 1125 break; 1126 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1127 target = (void *) &inti->io; 1128 source = &uptr->u.io; 1129 size = sizeof(inti->io); 1130 break; 1131 case KVM_S390_MCHK: 1132 target = (void *) &inti->mchk; 1133 source = &uptr->u.mchk; 1134 size = sizeof(inti->mchk); 1135 break; 1136 default: 1137 return -EINVAL; 1138 } 1139 1140 if (copy_from_user(target, source, size)) 1141 return -EFAULT; 1142 1143 return 0; 1144 } 1145 1146 static int enqueue_floating_irq(struct kvm_device *dev, 1147 struct kvm_device_attr *attr) 1148 { 1149 struct kvm_s390_interrupt_info *inti = NULL; 1150 int r = 0; 1151 int len = attr->attr; 1152 1153 if (len % sizeof(struct kvm_s390_irq) != 0) 1154 return -EINVAL; 1155 else if (len > KVM_S390_FLIC_MAX_BUFFER) 1156 return -EINVAL; 1157 1158 while (len >= sizeof(struct kvm_s390_irq)) { 1159 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 1160 if (!inti) 1161 return -ENOMEM; 1162 1163 r = copy_irq_from_user(inti, attr->addr); 1164 if (r) { 1165 kfree(inti); 1166 return r; 1167 } 1168 r = __inject_vm(dev->kvm, inti); 1169 if (r) { 1170 kfree(inti); 1171 return r; 1172 } 1173 len -= sizeof(struct kvm_s390_irq); 1174 attr->addr += sizeof(struct kvm_s390_irq); 1175 } 1176 1177 return r; 1178 } 1179 1180 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) 1181 { 1182 if (id >= MAX_S390_IO_ADAPTERS) 1183 return NULL; 1184 return kvm->arch.adapters[id]; 1185 } 1186 1187 static int register_io_adapter(struct kvm_device *dev, 1188 struct kvm_device_attr *attr) 1189 { 1190 struct s390_io_adapter *adapter; 1191 struct kvm_s390_io_adapter adapter_info; 1192 1193 if (copy_from_user(&adapter_info, 1194 (void __user *)attr->addr, sizeof(adapter_info))) 1195 return -EFAULT; 1196 1197 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || 1198 (dev->kvm->arch.adapters[adapter_info.id] != NULL)) 1199 return -EINVAL; 1200 1201 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 1202 if (!adapter) 1203 return -ENOMEM; 1204 1205 INIT_LIST_HEAD(&adapter->maps); 1206 init_rwsem(&adapter->maps_lock); 1207 atomic_set(&adapter->nr_maps, 0); 1208 adapter->id = adapter_info.id; 1209 adapter->isc = adapter_info.isc; 1210 adapter->maskable = adapter_info.maskable; 1211 adapter->masked = false; 1212 adapter->swap = adapter_info.swap; 1213 dev->kvm->arch.adapters[adapter->id] = adapter; 1214 1215 return 0; 1216 } 1217 1218 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) 1219 { 1220 int ret; 1221 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1222 1223 if (!adapter || !adapter->maskable) 1224 return -EINVAL; 1225 ret = adapter->masked; 1226 adapter->masked = masked; 1227 return ret; 1228 } 1229 1230 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) 1231 { 1232 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1233 struct s390_map_info *map; 1234 int ret; 1235 1236 if (!adapter || !addr) 1237 return -EINVAL; 1238 1239 map = kzalloc(sizeof(*map), GFP_KERNEL); 1240 if (!map) { 1241 ret = -ENOMEM; 1242 goto out; 1243 } 1244 INIT_LIST_HEAD(&map->list); 1245 map->guest_addr = addr; 1246 map->addr = gmap_translate(kvm->arch.gmap, addr); 1247 if (map->addr == -EFAULT) { 1248 ret = -EFAULT; 1249 goto out; 1250 } 1251 ret = get_user_pages_fast(map->addr, 1, 1, &map->page); 1252 if (ret < 0) 1253 goto out; 1254 BUG_ON(ret != 1); 1255 down_write(&adapter->maps_lock); 1256 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { 1257 list_add_tail(&map->list, &adapter->maps); 1258 ret = 0; 1259 } else { 1260 put_page(map->page); 1261 ret = -EINVAL; 1262 } 1263 up_write(&adapter->maps_lock); 1264 out: 1265 if (ret) 1266 kfree(map); 1267 return ret; 1268 } 1269 1270 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) 1271 { 1272 struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 1273 struct s390_map_info *map, *tmp; 1274 int found = 0; 1275 1276 if (!adapter || !addr) 1277 return -EINVAL; 1278 1279 down_write(&adapter->maps_lock); 1280 list_for_each_entry_safe(map, tmp, &adapter->maps, list) { 1281 if (map->guest_addr == addr) { 1282 found = 1; 1283 atomic_dec(&adapter->nr_maps); 1284 list_del(&map->list); 1285 put_page(map->page); 1286 kfree(map); 1287 break; 1288 } 1289 } 1290 up_write(&adapter->maps_lock); 1291 1292 return found ? 0 : -EINVAL; 1293 } 1294 1295 void kvm_s390_destroy_adapters(struct kvm *kvm) 1296 { 1297 int i; 1298 struct s390_map_info *map, *tmp; 1299 1300 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { 1301 if (!kvm->arch.adapters[i]) 1302 continue; 1303 list_for_each_entry_safe(map, tmp, 1304 &kvm->arch.adapters[i]->maps, list) { 1305 list_del(&map->list); 1306 put_page(map->page); 1307 kfree(map); 1308 } 1309 kfree(kvm->arch.adapters[i]); 1310 } 1311 } 1312 1313 static int modify_io_adapter(struct kvm_device *dev, 1314 struct kvm_device_attr *attr) 1315 { 1316 struct kvm_s390_io_adapter_req req; 1317 struct s390_io_adapter *adapter; 1318 int ret; 1319 1320 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 1321 return -EFAULT; 1322 1323 adapter = get_io_adapter(dev->kvm, req.id); 1324 if (!adapter) 1325 return -EINVAL; 1326 switch (req.type) { 1327 case KVM_S390_IO_ADAPTER_MASK: 1328 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); 1329 if (ret > 0) 1330 ret = 0; 1331 break; 1332 case KVM_S390_IO_ADAPTER_MAP: 1333 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); 1334 break; 1335 case KVM_S390_IO_ADAPTER_UNMAP: 1336 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); 1337 break; 1338 default: 1339 ret = -EINVAL; 1340 } 1341 1342 return ret; 1343 } 1344 1345 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1346 { 1347 int r = 0; 1348 unsigned int i; 1349 struct kvm_vcpu *vcpu; 1350 1351 switch (attr->group) { 1352 case KVM_DEV_FLIC_ENQUEUE: 1353 r = enqueue_floating_irq(dev, attr); 1354 break; 1355 case KVM_DEV_FLIC_CLEAR_IRQS: 1356 kvm_s390_clear_float_irqs(dev->kvm); 1357 break; 1358 case KVM_DEV_FLIC_APF_ENABLE: 1359 dev->kvm->arch.gmap->pfault_enabled = 1; 1360 break; 1361 case KVM_DEV_FLIC_APF_DISABLE_WAIT: 1362 dev->kvm->arch.gmap->pfault_enabled = 0; 1363 /* 1364 * Make sure no async faults are in transition when 1365 * clearing the queues. So we don't need to worry 1366 * about late coming workers. 1367 */ 1368 synchronize_srcu(&dev->kvm->srcu); 1369 kvm_for_each_vcpu(i, vcpu, dev->kvm) 1370 kvm_clear_async_pf_completion_queue(vcpu); 1371 break; 1372 case KVM_DEV_FLIC_ADAPTER_REGISTER: 1373 r = register_io_adapter(dev, attr); 1374 break; 1375 case KVM_DEV_FLIC_ADAPTER_MODIFY: 1376 r = modify_io_adapter(dev, attr); 1377 break; 1378 default: 1379 r = -EINVAL; 1380 } 1381 1382 return r; 1383 } 1384 1385 static int flic_create(struct kvm_device *dev, u32 type) 1386 { 1387 if (!dev) 1388 return -EINVAL; 1389 if (dev->kvm->arch.flic) 1390 return -EINVAL; 1391 dev->kvm->arch.flic = dev; 1392 return 0; 1393 } 1394 1395 static void flic_destroy(struct kvm_device *dev) 1396 { 1397 dev->kvm->arch.flic = NULL; 1398 kfree(dev); 1399 } 1400 1401 /* s390 floating irq controller (flic) */ 1402 struct kvm_device_ops kvm_flic_ops = { 1403 .name = "kvm-flic", 1404 .get_attr = flic_get_attr, 1405 .set_attr = flic_set_attr, 1406 .create = flic_create, 1407 .destroy = flic_destroy, 1408 }; 1409 1410 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) 1411 { 1412 unsigned long bit; 1413 1414 bit = bit_nr + (addr % PAGE_SIZE) * 8; 1415 1416 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; 1417 } 1418 1419 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, 1420 u64 addr) 1421 { 1422 struct s390_map_info *map; 1423 1424 if (!adapter) 1425 return NULL; 1426 1427 list_for_each_entry(map, &adapter->maps, list) { 1428 if (map->guest_addr == addr) 1429 return map; 1430 } 1431 return NULL; 1432 } 1433 1434 static int adapter_indicators_set(struct kvm *kvm, 1435 struct s390_io_adapter *adapter, 1436 struct kvm_s390_adapter_int *adapter_int) 1437 { 1438 unsigned long bit; 1439 int summary_set, idx; 1440 struct s390_map_info *info; 1441 void *map; 1442 1443 info = get_map_info(adapter, adapter_int->ind_addr); 1444 if (!info) 1445 return -1; 1446 map = page_address(info->page); 1447 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); 1448 set_bit(bit, map); 1449 idx = srcu_read_lock(&kvm->srcu); 1450 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1451 set_page_dirty_lock(info->page); 1452 info = get_map_info(adapter, adapter_int->summary_addr); 1453 if (!info) { 1454 srcu_read_unlock(&kvm->srcu, idx); 1455 return -1; 1456 } 1457 map = page_address(info->page); 1458 bit = get_ind_bit(info->addr, adapter_int->summary_offset, 1459 adapter->swap); 1460 summary_set = test_and_set_bit(bit, map); 1461 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); 1462 set_page_dirty_lock(info->page); 1463 srcu_read_unlock(&kvm->srcu, idx); 1464 return summary_set ? 0 : 1; 1465 } 1466 1467 /* 1468 * < 0 - not injected due to error 1469 * = 0 - coalesced, summary indicator already active 1470 * > 0 - injected interrupt 1471 */ 1472 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, 1473 struct kvm *kvm, int irq_source_id, int level, 1474 bool line_status) 1475 { 1476 int ret; 1477 struct s390_io_adapter *adapter; 1478 1479 /* We're only interested in the 0->1 transition. */ 1480 if (!level) 1481 return 0; 1482 adapter = get_io_adapter(kvm, e->adapter.adapter_id); 1483 if (!adapter) 1484 return -1; 1485 down_read(&adapter->maps_lock); 1486 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 1487 up_read(&adapter->maps_lock); 1488 if ((ret > 0) && !adapter->masked) { 1489 struct kvm_s390_interrupt s390int = { 1490 .type = KVM_S390_INT_IO(1, 0, 0, 0), 1491 .parm = 0, 1492 .parm64 = (adapter->isc << 27) | 0x80000000, 1493 }; 1494 ret = kvm_s390_inject_vm(kvm, &s390int); 1495 if (ret == 0) 1496 ret = 1; 1497 } 1498 return ret; 1499 } 1500 1501 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, 1502 const struct kvm_irq_routing_entry *ue) 1503 { 1504 int ret; 1505 1506 switch (ue->type) { 1507 case KVM_IRQ_ROUTING_S390_ADAPTER: 1508 e->set = set_adapter_int; 1509 e->adapter.summary_addr = ue->u.adapter.summary_addr; 1510 e->adapter.ind_addr = ue->u.adapter.ind_addr; 1511 e->adapter.summary_offset = ue->u.adapter.summary_offset; 1512 e->adapter.ind_offset = ue->u.adapter.ind_offset; 1513 e->adapter.adapter_id = ue->u.adapter.adapter_id; 1514 ret = 0; 1515 break; 1516 default: 1517 ret = -EINVAL; 1518 } 1519 1520 return ret; 1521 } 1522 1523 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, 1524 int irq_source_id, int level, bool line_status) 1525 { 1526 return -EINVAL; 1527 } 1528