1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include <linux/kvm.h> 25 #include <asm/ptrace.h> 26 27 #include "cpu.h" 28 #include "s390x-internal.h" 29 #include "kvm_s390x.h" 30 #include "sysemu/kvm_int.h" 31 #include "qemu/cutils.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "qemu/timer.h" 35 #include "qemu/units.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/mmap-alloc.h" 38 #include "qemu/log.h" 39 #include "sysemu/sysemu.h" 40 #include "sysemu/hw_accel.h" 41 #include "sysemu/runstate.h" 42 #include "sysemu/device_tree.h" 43 #include "exec/gdbstub.h" 44 #include "exec/ram_addr.h" 45 #include "trace.h" 46 #include "hw/s390x/s390-pci-inst.h" 47 #include "hw/s390x/s390-pci-bus.h" 48 #include "hw/s390x/ipl.h" 49 #include "hw/s390x/ebcdic.h" 50 #include "exec/memattrs.h" 51 #include "hw/s390x/s390-virtio-ccw.h" 52 #include "hw/s390x/s390-virtio-hcall.h" 53 #include "target/s390x/kvm/pv.h" 54 55 #define kvm_vm_check_mem_attr(s, attr) \ 56 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 57 58 #define IPA0_DIAG 0x8300 59 #define IPA0_SIGP 0xae00 60 #define IPA0_B2 0xb200 61 #define IPA0_B9 0xb900 62 #define IPA0_EB 0xeb00 63 #define IPA0_E3 0xe300 64 65 #define PRIV_B2_SCLP_CALL 0x20 66 #define PRIV_B2_CSCH 0x30 67 #define PRIV_B2_HSCH 0x31 68 #define PRIV_B2_MSCH 0x32 69 #define PRIV_B2_SSCH 0x33 70 #define PRIV_B2_STSCH 0x34 71 #define PRIV_B2_TSCH 0x35 72 #define PRIV_B2_TPI 0x36 73 #define PRIV_B2_SAL 0x37 74 #define PRIV_B2_RSCH 0x38 75 #define PRIV_B2_STCRW 0x39 76 #define PRIV_B2_STCPS 0x3a 77 #define PRIV_B2_RCHP 0x3b 78 #define PRIV_B2_SCHM 0x3c 79 #define PRIV_B2_CHSC 0x5f 80 #define PRIV_B2_SIGA 0x74 81 #define PRIV_B2_XSCH 0x76 82 83 #define PRIV_EB_SQBS 0x8a 84 #define PRIV_EB_PCISTB 0xd0 85 #define PRIV_EB_SIC 0xd1 86 87 #define PRIV_B9_EQBS 0x9c 88 #define PRIV_B9_CLP 0xa0 89 #define PRIV_B9_PCISTG 0xd0 90 #define PRIV_B9_PCILG 0xd2 91 #define PRIV_B9_RPCIT 0xd3 92 93 #define PRIV_E3_MPCIFC 0xd0 94 #define PRIV_E3_STPCIFC 0xd4 95 96 #define DIAG_TIMEREVENT 0x288 97 #define DIAG_IPL 0x308 98 #define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 99 #define DIAG_KVM_HYPERCALL 0x500 100 #define DIAG_KVM_BREAKPOINT 0x501 101 102 #define ICPT_INSTRUCTION 0x04 103 #define ICPT_PROGRAM 0x08 104 #define ICPT_EXT_INT 0x14 105 #define ICPT_WAITPSW 0x1c 106 #define ICPT_SOFT_INTERCEPT 0x24 107 #define ICPT_CPU_STOP 0x28 108 #define ICPT_OPEREXC 0x2c 109 #define ICPT_IO 0x40 110 #define ICPT_PV_INSTR 0x68 111 #define ICPT_PV_INSTR_NOTIFICATION 0x6c 112 113 #define NR_LOCAL_IRQS 32 114 /* 115 * Needs to be big enough to contain max_cpus emergency signals 116 * and in addition NR_LOCAL_IRQS interrupts 117 */ 118 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ 119 (max_cpus + NR_LOCAL_IRQS)) 120 /* 121 * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages 122 * as the dirty bitmap must be managed by bitops that take an int as 123 * position indicator. This would end at an unaligned address 124 * (0x7fffff00000). As future variants might provide larger pages 125 * and to make all addresses properly aligned, let us split at 4TB. 126 */ 127 #define KVM_SLOT_MAX_BYTES (4UL * TiB) 128 129 static CPUWatchpoint hw_watchpoint; 130 /* 131 * We don't use a list because this structure is also used to transmit the 132 * hardware breakpoints to the kernel. 133 */ 134 static struct kvm_hw_breakpoint *hw_breakpoints; 135 static int nb_hw_breakpoints; 136 137 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 138 KVM_CAP_LAST_INFO 139 }; 140 141 static int cap_sync_regs; 142 static int cap_async_pf; 143 static int cap_mem_op; 144 static int cap_mem_op_extension; 145 static int cap_s390_irq; 146 static int cap_ri; 147 static int cap_hpage_1m; 148 static int cap_vcpu_resets; 149 static int cap_protected; 150 static int cap_zpci_op; 151 static int cap_protected_dump; 152 153 static bool mem_op_storage_key_support; 154 155 static int active_cmma; 156 157 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 158 { 159 struct kvm_device_attr attr = { 160 .group = KVM_S390_VM_MEM_CTRL, 161 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 162 .addr = (uint64_t) memory_limit, 163 }; 164 165 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 166 } 167 168 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 169 { 170 int rc; 171 172 struct kvm_device_attr attr = { 173 .group = KVM_S390_VM_MEM_CTRL, 174 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 175 .addr = (uint64_t) &new_limit, 176 }; 177 178 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 179 return 0; 180 } 181 182 rc = kvm_s390_query_mem_limit(hw_limit); 183 if (rc) { 184 return rc; 185 } else if (*hw_limit < new_limit) { 186 return -E2BIG; 187 } 188 189 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 190 } 191 192 int kvm_s390_cmma_active(void) 193 { 194 return active_cmma; 195 } 196 197 static bool kvm_s390_cmma_available(void) 198 { 199 static bool initialized, value; 200 201 if (!initialized) { 202 initialized = true; 203 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 204 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 205 } 206 return value; 207 } 208 209 void kvm_s390_cmma_reset(void) 210 { 211 int rc; 212 struct kvm_device_attr attr = { 213 .group = KVM_S390_VM_MEM_CTRL, 214 .attr = KVM_S390_VM_MEM_CLR_CMMA, 215 }; 216 217 if (!kvm_s390_cmma_active()) { 218 return; 219 } 220 221 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 222 trace_kvm_clear_cmma(rc); 223 } 224 225 static void kvm_s390_enable_cmma(void) 226 { 227 int rc; 228 struct kvm_device_attr attr = { 229 .group = KVM_S390_VM_MEM_CTRL, 230 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 231 }; 232 233 if (cap_hpage_1m) { 234 warn_report("CMM will not be enabled because it is not " 235 "compatible with huge memory backings."); 236 return; 237 } 238 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 239 active_cmma = !rc; 240 trace_kvm_enable_cmma(rc); 241 } 242 243 static void kvm_s390_set_crypto_attr(uint64_t attr) 244 { 245 struct kvm_device_attr attribute = { 246 .group = KVM_S390_VM_CRYPTO, 247 .attr = attr, 248 }; 249 250 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 251 252 if (ret) { 253 error_report("Failed to set crypto device attribute %lu: %s", 254 attr, strerror(-ret)); 255 } 256 } 257 258 static void kvm_s390_init_aes_kw(void) 259 { 260 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 261 262 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 263 NULL)) { 264 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 265 } 266 267 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 268 kvm_s390_set_crypto_attr(attr); 269 } 270 } 271 272 static void kvm_s390_init_dea_kw(void) 273 { 274 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 275 276 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 277 NULL)) { 278 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 279 } 280 281 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 282 kvm_s390_set_crypto_attr(attr); 283 } 284 } 285 286 void kvm_s390_crypto_reset(void) 287 { 288 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 289 kvm_s390_init_aes_kw(); 290 kvm_s390_init_dea_kw(); 291 } 292 } 293 294 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) 295 { 296 if (pagesize == 4 * KiB) { 297 return; 298 } 299 300 if (!hpage_1m_allowed()) { 301 error_setg(errp, "This QEMU machine does not support huge page " 302 "mappings"); 303 return; 304 } 305 306 if (pagesize != 1 * MiB) { 307 error_setg(errp, "Memory backing with 2G pages was specified, " 308 "but KVM does not support this memory backing"); 309 return; 310 } 311 312 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { 313 error_setg(errp, "Memory backing with 1M pages was specified, " 314 "but KVM does not support this memory backing"); 315 return; 316 } 317 318 cap_hpage_1m = 1; 319 } 320 321 int kvm_s390_get_hpage_1m(void) 322 { 323 return cap_hpage_1m; 324 } 325 326 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) 327 { 328 MachineClass *mc = MACHINE_CLASS(oc); 329 330 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 331 } 332 333 int kvm_arch_get_default_type(MachineState *ms) 334 { 335 return 0; 336 } 337 338 int kvm_arch_init(MachineState *ms, KVMState *s) 339 { 340 object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, 341 false, NULL); 342 343 if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { 344 error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " 345 "please use kernel 3.15 or newer"); 346 return -1; 347 } 348 if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { 349 error_report("KVM is missing capability KVM_CAP_S390_COW - " 350 "unsupported environment"); 351 return -1; 352 } 353 354 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); 355 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 356 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 357 cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); 358 mem_op_storage_key_support = cap_mem_op_extension > 0; 359 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 360 cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); 361 cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); 362 cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP); 363 cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP); 364 365 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 366 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 367 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 368 if (ri_allowed()) { 369 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 370 cap_ri = 1; 371 } 372 } 373 if (cpu_model_allowed()) { 374 kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); 375 } 376 377 /* 378 * The migration interface for ais was introduced with kernel 4.13 379 * but the capability itself had been active since 4.12. As migration 380 * support is considered necessary, we only try to enable this for 381 * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. 382 */ 383 if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && 384 kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { 385 kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); 386 } 387 388 kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); 389 return 0; 390 } 391 392 int kvm_arch_irqchip_create(KVMState *s) 393 { 394 return 0; 395 } 396 397 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 398 { 399 return cpu->cpu_index; 400 } 401 402 int kvm_arch_init_vcpu(CPUState *cs) 403 { 404 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 405 S390CPU *cpu = S390_CPU(cs); 406 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 407 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); 408 return 0; 409 } 410 411 int kvm_arch_destroy_vcpu(CPUState *cs) 412 { 413 S390CPU *cpu = S390_CPU(cs); 414 415 g_free(cpu->irqstate); 416 cpu->irqstate = NULL; 417 418 return 0; 419 } 420 421 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) 422 { 423 CPUState *cs = CPU(cpu); 424 425 /* 426 * The reset call is needed here to reset in-kernel vcpu data that 427 * we can't access directly from QEMU (i.e. with older kernels 428 * which don't support sync_regs/ONE_REG). Before this ioctl 429 * cpu_synchronize_state() is called in common kvm code 430 * (kvm-all). 431 */ 432 if (kvm_vcpu_ioctl(cs, type)) { 433 error_report("CPU reset failed on CPU %i type %lx", 434 cs->cpu_index, type); 435 } 436 } 437 438 void kvm_s390_reset_vcpu_initial(S390CPU *cpu) 439 { 440 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 441 } 442 443 void kvm_s390_reset_vcpu_clear(S390CPU *cpu) 444 { 445 if (cap_vcpu_resets) { 446 kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); 447 } else { 448 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 449 } 450 } 451 452 void kvm_s390_reset_vcpu_normal(S390CPU *cpu) 453 { 454 if (cap_vcpu_resets) { 455 kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); 456 } 457 } 458 459 static int can_sync_regs(CPUState *cs, int regs) 460 { 461 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; 462 } 463 464 int kvm_arch_put_registers(CPUState *cs, int level) 465 { 466 S390CPU *cpu = S390_CPU(cs); 467 CPUS390XState *env = &cpu->env; 468 struct kvm_sregs sregs; 469 struct kvm_regs regs; 470 struct kvm_fpu fpu = {}; 471 int r; 472 int i; 473 474 /* always save the PSW and the GPRS*/ 475 cs->kvm_run->psw_addr = env->psw.addr; 476 cs->kvm_run->psw_mask = env->psw.mask; 477 478 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 479 for (i = 0; i < 16; i++) { 480 cs->kvm_run->s.regs.gprs[i] = env->regs[i]; 481 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 482 } 483 } else { 484 for (i = 0; i < 16; i++) { 485 regs.gprs[i] = env->regs[i]; 486 } 487 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 488 if (r < 0) { 489 return r; 490 } 491 } 492 493 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 494 for (i = 0; i < 32; i++) { 495 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; 496 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; 497 } 498 cs->kvm_run->s.regs.fpc = env->fpc; 499 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 500 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 501 for (i = 0; i < 16; i++) { 502 cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); 503 } 504 cs->kvm_run->s.regs.fpc = env->fpc; 505 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 506 } else { 507 /* Floating point */ 508 for (i = 0; i < 16; i++) { 509 fpu.fprs[i] = *get_freg(env, i); 510 } 511 fpu.fpc = env->fpc; 512 513 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 514 if (r < 0) { 515 return r; 516 } 517 } 518 519 /* Do we need to save more than that? */ 520 if (level == KVM_PUT_RUNTIME_STATE) { 521 return 0; 522 } 523 524 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 525 cs->kvm_run->s.regs.cputm = env->cputm; 526 cs->kvm_run->s.regs.ckc = env->ckc; 527 cs->kvm_run->s.regs.todpr = env->todpr; 528 cs->kvm_run->s.regs.gbea = env->gbea; 529 cs->kvm_run->s.regs.pp = env->pp; 530 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 531 } else { 532 /* 533 * These ONE_REGS are not protected by a capability. As they are only 534 * necessary for migration we just trace a possible error, but don't 535 * return with an error return code. 536 */ 537 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 538 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 539 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 540 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 541 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 542 } 543 544 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 545 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 546 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 547 } 548 549 /* pfault parameters */ 550 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 551 cs->kvm_run->s.regs.pft = env->pfault_token; 552 cs->kvm_run->s.regs.pfs = env->pfault_select; 553 cs->kvm_run->s.regs.pfc = env->pfault_compare; 554 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 555 } else if (cap_async_pf) { 556 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 557 if (r < 0) { 558 return r; 559 } 560 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 561 if (r < 0) { 562 return r; 563 } 564 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 565 if (r < 0) { 566 return r; 567 } 568 } 569 570 /* access registers and control registers*/ 571 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 572 for (i = 0; i < 16; i++) { 573 cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; 574 cs->kvm_run->s.regs.crs[i] = env->cregs[i]; 575 } 576 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; 577 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; 578 } else { 579 for (i = 0; i < 16; i++) { 580 sregs.acrs[i] = env->aregs[i]; 581 sregs.crs[i] = env->cregs[i]; 582 } 583 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 584 if (r < 0) { 585 return r; 586 } 587 } 588 589 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 590 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 591 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 592 } 593 594 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 595 cs->kvm_run->s.regs.bpbc = env->bpbc; 596 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; 597 } 598 599 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 600 cs->kvm_run->s.regs.etoken = env->etoken; 601 cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; 602 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; 603 } 604 605 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 606 cs->kvm_run->s.regs.diag318 = env->diag318_info; 607 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 608 } 609 610 /* Finally the prefix */ 611 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 612 cs->kvm_run->s.regs.prefix = env->psa; 613 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; 614 } else { 615 /* prefix is only supported via sync regs */ 616 } 617 return 0; 618 } 619 620 int kvm_arch_get_registers(CPUState *cs) 621 { 622 S390CPU *cpu = S390_CPU(cs); 623 CPUS390XState *env = &cpu->env; 624 struct kvm_sregs sregs; 625 struct kvm_regs regs; 626 struct kvm_fpu fpu; 627 int i, r; 628 629 /* get the PSW */ 630 env->psw.addr = cs->kvm_run->psw_addr; 631 env->psw.mask = cs->kvm_run->psw_mask; 632 633 /* the GPRS */ 634 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 635 for (i = 0; i < 16; i++) { 636 env->regs[i] = cs->kvm_run->s.regs.gprs[i]; 637 } 638 } else { 639 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 640 if (r < 0) { 641 return r; 642 } 643 for (i = 0; i < 16; i++) { 644 env->regs[i] = regs.gprs[i]; 645 } 646 } 647 648 /* The ACRS and CRS */ 649 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 650 for (i = 0; i < 16; i++) { 651 env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; 652 env->cregs[i] = cs->kvm_run->s.regs.crs[i]; 653 } 654 } else { 655 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 656 if (r < 0) { 657 return r; 658 } 659 for (i = 0; i < 16; i++) { 660 env->aregs[i] = sregs.acrs[i]; 661 env->cregs[i] = sregs.crs[i]; 662 } 663 } 664 665 /* Floating point and vector registers */ 666 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 667 for (i = 0; i < 32; i++) { 668 env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; 669 env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; 670 } 671 env->fpc = cs->kvm_run->s.regs.fpc; 672 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 673 for (i = 0; i < 16; i++) { 674 *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; 675 } 676 env->fpc = cs->kvm_run->s.regs.fpc; 677 } else { 678 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 679 if (r < 0) { 680 return r; 681 } 682 for (i = 0; i < 16; i++) { 683 *get_freg(env, i) = fpu.fprs[i]; 684 } 685 env->fpc = fpu.fpc; 686 } 687 688 /* The prefix */ 689 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 690 env->psa = cs->kvm_run->s.regs.prefix; 691 } 692 693 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 694 env->cputm = cs->kvm_run->s.regs.cputm; 695 env->ckc = cs->kvm_run->s.regs.ckc; 696 env->todpr = cs->kvm_run->s.regs.todpr; 697 env->gbea = cs->kvm_run->s.regs.gbea; 698 env->pp = cs->kvm_run->s.regs.pp; 699 } else { 700 /* 701 * These ONE_REGS are not protected by a capability. As they are only 702 * necessary for migration we just trace a possible error, but don't 703 * return with an error return code. 704 */ 705 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 706 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 707 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 708 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 709 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 710 } 711 712 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 713 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 714 } 715 716 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 717 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 718 } 719 720 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 721 env->bpbc = cs->kvm_run->s.regs.bpbc; 722 } 723 724 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 725 env->etoken = cs->kvm_run->s.regs.etoken; 726 env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; 727 } 728 729 /* pfault parameters */ 730 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 731 env->pfault_token = cs->kvm_run->s.regs.pft; 732 env->pfault_select = cs->kvm_run->s.regs.pfs; 733 env->pfault_compare = cs->kvm_run->s.regs.pfc; 734 } else if (cap_async_pf) { 735 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 736 if (r < 0) { 737 return r; 738 } 739 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 740 if (r < 0) { 741 return r; 742 } 743 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 744 if (r < 0) { 745 return r; 746 } 747 } 748 749 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 750 env->diag318_info = cs->kvm_run->s.regs.diag318; 751 } 752 753 return 0; 754 } 755 756 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 757 { 758 int r; 759 struct kvm_device_attr attr = { 760 .group = KVM_S390_VM_TOD, 761 .attr = KVM_S390_VM_TOD_LOW, 762 .addr = (uint64_t)tod_low, 763 }; 764 765 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 766 if (r) { 767 return r; 768 } 769 770 attr.attr = KVM_S390_VM_TOD_HIGH; 771 attr.addr = (uint64_t)tod_high; 772 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 773 } 774 775 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 776 { 777 int r; 778 struct kvm_s390_vm_tod_clock gtod; 779 struct kvm_device_attr attr = { 780 .group = KVM_S390_VM_TOD, 781 .attr = KVM_S390_VM_TOD_EXT, 782 .addr = (uint64_t)>od, 783 }; 784 785 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 786 *tod_high = gtod.epoch_idx; 787 *tod_low = gtod.tod; 788 789 return r; 790 } 791 792 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) 793 { 794 int r; 795 struct kvm_device_attr attr = { 796 .group = KVM_S390_VM_TOD, 797 .attr = KVM_S390_VM_TOD_LOW, 798 .addr = (uint64_t)&tod_low, 799 }; 800 801 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 802 if (r) { 803 return r; 804 } 805 806 attr.attr = KVM_S390_VM_TOD_HIGH; 807 attr.addr = (uint64_t)&tod_high; 808 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 809 } 810 811 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) 812 { 813 struct kvm_s390_vm_tod_clock gtod = { 814 .epoch_idx = tod_high, 815 .tod = tod_low, 816 }; 817 struct kvm_device_attr attr = { 818 .group = KVM_S390_VM_TOD, 819 .attr = KVM_S390_VM_TOD_EXT, 820 .addr = (uint64_t)>od, 821 }; 822 823 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 824 } 825 826 /** 827 * kvm_s390_mem_op: 828 * @addr: the logical start address in guest memory 829 * @ar: the access register number 830 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 831 * @len: length that should be transferred 832 * @is_write: true = write, false = read 833 * Returns: 0 on success, non-zero if an exception or error occurred 834 * 835 * Use KVM ioctl to read/write from/to guest memory. An access exception 836 * is injected into the vCPU in case of translation errors. 837 */ 838 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 839 int len, bool is_write) 840 { 841 struct kvm_s390_mem_op mem_op = { 842 .gaddr = addr, 843 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 844 .size = len, 845 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 846 : KVM_S390_MEMOP_LOGICAL_READ, 847 .buf = (uint64_t)hostbuf, 848 .ar = ar, 849 .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, 850 }; 851 int ret; 852 853 if (!cap_mem_op) { 854 return -ENOSYS; 855 } 856 if (!hostbuf) { 857 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 858 } 859 if (mem_op_storage_key_support) { 860 mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; 861 } 862 863 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 864 if (ret < 0) { 865 warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 866 } 867 return ret; 868 } 869 870 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, 871 int len, bool is_write) 872 { 873 struct kvm_s390_mem_op mem_op = { 874 .sida_offset = offset, 875 .size = len, 876 .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE 877 : KVM_S390_MEMOP_SIDA_READ, 878 .buf = (uint64_t)hostbuf, 879 }; 880 int ret; 881 882 if (!cap_mem_op || !cap_protected) { 883 return -ENOSYS; 884 } 885 886 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 887 if (ret < 0) { 888 error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 889 abort(); 890 } 891 return ret; 892 } 893 894 static uint8_t const *sw_bp_inst; 895 static uint8_t sw_bp_ilen; 896 897 static void determine_sw_breakpoint_instr(void) 898 { 899 /* DIAG 501 is used for sw breakpoints with old kernels */ 900 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 901 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 902 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 903 904 if (sw_bp_inst) { 905 return; 906 } 907 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 908 sw_bp_inst = diag_501; 909 sw_bp_ilen = sizeof(diag_501); 910 trace_kvm_sw_breakpoint(4); 911 } else { 912 sw_bp_inst = instr_0x0000; 913 sw_bp_ilen = sizeof(instr_0x0000); 914 trace_kvm_sw_breakpoint(2); 915 } 916 } 917 918 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 919 { 920 determine_sw_breakpoint_instr(); 921 922 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 923 sw_bp_ilen, 0) || 924 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 925 return -EINVAL; 926 } 927 return 0; 928 } 929 930 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 931 { 932 uint8_t t[MAX_ILEN]; 933 934 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 935 return -EINVAL; 936 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 937 return -EINVAL; 938 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 939 sw_bp_ilen, 1)) { 940 return -EINVAL; 941 } 942 943 return 0; 944 } 945 946 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 947 int len, int type) 948 { 949 int n; 950 951 for (n = 0; n < nb_hw_breakpoints; n++) { 952 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 953 (hw_breakpoints[n].len == len || len == -1)) { 954 return &hw_breakpoints[n]; 955 } 956 } 957 958 return NULL; 959 } 960 961 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 962 { 963 int size; 964 965 if (find_hw_breakpoint(addr, len, type)) { 966 return -EEXIST; 967 } 968 969 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 970 971 if (!hw_breakpoints) { 972 nb_hw_breakpoints = 0; 973 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 974 } else { 975 hw_breakpoints = 976 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 977 } 978 979 if (!hw_breakpoints) { 980 nb_hw_breakpoints = 0; 981 return -ENOMEM; 982 } 983 984 hw_breakpoints[nb_hw_breakpoints].addr = addr; 985 hw_breakpoints[nb_hw_breakpoints].len = len; 986 hw_breakpoints[nb_hw_breakpoints].type = type; 987 988 nb_hw_breakpoints++; 989 990 return 0; 991 } 992 993 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) 994 { 995 switch (type) { 996 case GDB_BREAKPOINT_HW: 997 type = KVM_HW_BP; 998 break; 999 case GDB_WATCHPOINT_WRITE: 1000 if (len < 1) { 1001 return -EINVAL; 1002 } 1003 type = KVM_HW_WP_WRITE; 1004 break; 1005 default: 1006 return -ENOSYS; 1007 } 1008 return insert_hw_breakpoint(addr, len, type); 1009 } 1010 1011 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) 1012 { 1013 int size; 1014 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 1015 1016 if (bp == NULL) { 1017 return -ENOENT; 1018 } 1019 1020 nb_hw_breakpoints--; 1021 if (nb_hw_breakpoints > 0) { 1022 /* 1023 * In order to trim the array, move the last element to the position to 1024 * be removed - if necessary. 1025 */ 1026 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 1027 *bp = hw_breakpoints[nb_hw_breakpoints]; 1028 } 1029 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 1030 hw_breakpoints = 1031 g_realloc(hw_breakpoints, size); 1032 } else { 1033 g_free(hw_breakpoints); 1034 hw_breakpoints = NULL; 1035 } 1036 1037 return 0; 1038 } 1039 1040 void kvm_arch_remove_all_hw_breakpoints(void) 1041 { 1042 nb_hw_breakpoints = 0; 1043 g_free(hw_breakpoints); 1044 hw_breakpoints = NULL; 1045 } 1046 1047 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 1048 { 1049 int i; 1050 1051 if (nb_hw_breakpoints > 0) { 1052 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 1053 dbg->arch.hw_bp = hw_breakpoints; 1054 1055 for (i = 0; i < nb_hw_breakpoints; ++i) { 1056 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 1057 hw_breakpoints[i].addr); 1058 } 1059 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 1060 } else { 1061 dbg->arch.nr_hw_bp = 0; 1062 dbg->arch.hw_bp = NULL; 1063 } 1064 } 1065 1066 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 1067 { 1068 } 1069 1070 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1071 { 1072 return MEMTXATTRS_UNSPECIFIED; 1073 } 1074 1075 int kvm_arch_process_async_events(CPUState *cs) 1076 { 1077 return cs->halted; 1078 } 1079 1080 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 1081 struct kvm_s390_interrupt *interrupt) 1082 { 1083 int r = 0; 1084 1085 interrupt->type = irq->type; 1086 switch (irq->type) { 1087 case KVM_S390_INT_VIRTIO: 1088 interrupt->parm = irq->u.ext.ext_params; 1089 /* fall through */ 1090 case KVM_S390_INT_PFAULT_INIT: 1091 case KVM_S390_INT_PFAULT_DONE: 1092 interrupt->parm64 = irq->u.ext.ext_params2; 1093 break; 1094 case KVM_S390_PROGRAM_INT: 1095 interrupt->parm = irq->u.pgm.code; 1096 break; 1097 case KVM_S390_SIGP_SET_PREFIX: 1098 interrupt->parm = irq->u.prefix.address; 1099 break; 1100 case KVM_S390_INT_SERVICE: 1101 interrupt->parm = irq->u.ext.ext_params; 1102 break; 1103 case KVM_S390_MCHK: 1104 interrupt->parm = irq->u.mchk.cr14; 1105 interrupt->parm64 = irq->u.mchk.mcic; 1106 break; 1107 case KVM_S390_INT_EXTERNAL_CALL: 1108 interrupt->parm = irq->u.extcall.code; 1109 break; 1110 case KVM_S390_INT_EMERGENCY: 1111 interrupt->parm = irq->u.emerg.code; 1112 break; 1113 case KVM_S390_SIGP_STOP: 1114 case KVM_S390_RESTART: 1115 break; /* These types have no parameters */ 1116 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1117 interrupt->parm = irq->u.io.subchannel_id << 16; 1118 interrupt->parm |= irq->u.io.subchannel_nr; 1119 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 1120 interrupt->parm64 |= irq->u.io.io_int_word; 1121 break; 1122 default: 1123 r = -EINVAL; 1124 break; 1125 } 1126 return r; 1127 } 1128 1129 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 1130 { 1131 struct kvm_s390_interrupt kvmint = {}; 1132 int r; 1133 1134 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1135 if (r < 0) { 1136 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1137 exit(1); 1138 } 1139 1140 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1141 if (r < 0) { 1142 fprintf(stderr, "KVM failed to inject interrupt\n"); 1143 exit(1); 1144 } 1145 } 1146 1147 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1148 { 1149 CPUState *cs = CPU(cpu); 1150 int r; 1151 1152 if (cap_s390_irq) { 1153 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1154 if (!r) { 1155 return; 1156 } 1157 error_report("KVM failed to inject interrupt %llx", irq->type); 1158 exit(1); 1159 } 1160 1161 inject_vcpu_irq_legacy(cs, irq); 1162 } 1163 1164 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) 1165 { 1166 struct kvm_s390_interrupt kvmint = {}; 1167 int r; 1168 1169 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1170 if (r < 0) { 1171 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1172 exit(1); 1173 } 1174 1175 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1176 if (r < 0) { 1177 fprintf(stderr, "KVM failed to inject interrupt\n"); 1178 exit(1); 1179 } 1180 } 1181 1182 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1183 { 1184 struct kvm_s390_irq irq = { 1185 .type = KVM_S390_PROGRAM_INT, 1186 .u.pgm.code = code, 1187 }; 1188 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", 1189 cpu->env.psw.addr); 1190 kvm_s390_vcpu_interrupt(cpu, &irq); 1191 } 1192 1193 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1194 { 1195 struct kvm_s390_irq irq = { 1196 .type = KVM_S390_PROGRAM_INT, 1197 .u.pgm.code = code, 1198 .u.pgm.trans_exc_code = te_code, 1199 .u.pgm.exc_access_id = te_code & 3, 1200 }; 1201 1202 kvm_s390_vcpu_interrupt(cpu, &irq); 1203 } 1204 1205 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1206 uint16_t ipbh0) 1207 { 1208 CPUS390XState *env = &cpu->env; 1209 uint64_t sccb; 1210 uint32_t code; 1211 int r; 1212 1213 sccb = env->regs[ipbh0 & 0xf]; 1214 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1215 1216 switch (run->s390_sieic.icptcode) { 1217 case ICPT_PV_INSTR_NOTIFICATION: 1218 g_assert(s390_is_pv()); 1219 /* The notification intercepts are currently handled by KVM */ 1220 error_report("unexpected SCLP PV notification"); 1221 exit(1); 1222 break; 1223 case ICPT_PV_INSTR: 1224 g_assert(s390_is_pv()); 1225 sclp_service_call_protected(env, sccb, code); 1226 /* Setting the CC is done by the Ultravisor. */ 1227 break; 1228 case ICPT_INSTRUCTION: 1229 g_assert(!s390_is_pv()); 1230 r = sclp_service_call(env, sccb, code); 1231 if (r < 0) { 1232 kvm_s390_program_interrupt(cpu, -r); 1233 return; 1234 } 1235 setcc(cpu, r); 1236 } 1237 } 1238 1239 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1240 { 1241 CPUS390XState *env = &cpu->env; 1242 int rc = 0; 1243 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1244 1245 switch (ipa1) { 1246 case PRIV_B2_XSCH: 1247 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1248 break; 1249 case PRIV_B2_CSCH: 1250 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1251 break; 1252 case PRIV_B2_HSCH: 1253 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1254 break; 1255 case PRIV_B2_MSCH: 1256 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1257 break; 1258 case PRIV_B2_SSCH: 1259 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1260 break; 1261 case PRIV_B2_STCRW: 1262 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1263 break; 1264 case PRIV_B2_STSCH: 1265 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1266 break; 1267 case PRIV_B2_TSCH: 1268 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1269 fprintf(stderr, "Spurious tsch intercept\n"); 1270 break; 1271 case PRIV_B2_CHSC: 1272 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1273 break; 1274 case PRIV_B2_TPI: 1275 /* This should have been handled by kvm already. */ 1276 fprintf(stderr, "Spurious tpi intercept\n"); 1277 break; 1278 case PRIV_B2_SCHM: 1279 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1280 run->s390_sieic.ipb, RA_IGNORED); 1281 break; 1282 case PRIV_B2_RSCH: 1283 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1284 break; 1285 case PRIV_B2_RCHP: 1286 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1287 break; 1288 case PRIV_B2_STCPS: 1289 /* We do not provide this instruction, it is suppressed. */ 1290 break; 1291 case PRIV_B2_SAL: 1292 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1293 break; 1294 case PRIV_B2_SIGA: 1295 /* Not provided, set CC = 3 for subchannel not operational */ 1296 setcc(cpu, 3); 1297 break; 1298 case PRIV_B2_SCLP_CALL: 1299 kvm_sclp_service_call(cpu, run, ipbh0); 1300 break; 1301 default: 1302 rc = -1; 1303 trace_kvm_insn_unhandled_priv(ipa1); 1304 break; 1305 } 1306 1307 return rc; 1308 } 1309 1310 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1311 uint8_t *ar) 1312 { 1313 CPUS390XState *env = &cpu->env; 1314 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1315 uint32_t base2 = run->s390_sieic.ipb >> 28; 1316 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1317 ((run->s390_sieic.ipb & 0xff00) << 4); 1318 1319 if (disp2 & 0x80000) { 1320 disp2 += 0xfff00000; 1321 } 1322 if (ar) { 1323 *ar = base2; 1324 } 1325 1326 return (base2 ? env->regs[base2] : 0) + 1327 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1328 } 1329 1330 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1331 uint8_t *ar) 1332 { 1333 CPUS390XState *env = &cpu->env; 1334 uint32_t base2 = run->s390_sieic.ipb >> 28; 1335 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1336 ((run->s390_sieic.ipb & 0xff00) << 4); 1337 1338 if (disp2 & 0x80000) { 1339 disp2 += 0xfff00000; 1340 } 1341 if (ar) { 1342 *ar = base2; 1343 } 1344 1345 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1346 } 1347 1348 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1349 { 1350 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1351 1352 if (s390_has_feat(S390_FEAT_ZPCI)) { 1353 return clp_service_call(cpu, r2, RA_IGNORED); 1354 } else { 1355 return -1; 1356 } 1357 } 1358 1359 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1360 { 1361 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1362 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1363 1364 if (s390_has_feat(S390_FEAT_ZPCI)) { 1365 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1366 } else { 1367 return -1; 1368 } 1369 } 1370 1371 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1372 { 1373 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1374 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1375 1376 if (s390_has_feat(S390_FEAT_ZPCI)) { 1377 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1378 } else { 1379 return -1; 1380 } 1381 } 1382 1383 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1384 { 1385 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1386 uint64_t fiba; 1387 uint8_t ar; 1388 1389 if (s390_has_feat(S390_FEAT_ZPCI)) { 1390 fiba = get_base_disp_rxy(cpu, run, &ar); 1391 1392 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1393 } else { 1394 return -1; 1395 } 1396 } 1397 1398 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1399 { 1400 CPUS390XState *env = &cpu->env; 1401 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1402 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1403 uint8_t isc; 1404 uint16_t mode; 1405 int r; 1406 1407 mode = env->regs[r1] & 0xffff; 1408 isc = (env->regs[r3] >> 27) & 0x7; 1409 r = css_do_sic(env, isc, mode); 1410 if (r) { 1411 kvm_s390_program_interrupt(cpu, -r); 1412 } 1413 1414 return 0; 1415 } 1416 1417 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1418 { 1419 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1420 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1421 1422 if (s390_has_feat(S390_FEAT_ZPCI)) { 1423 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1424 } else { 1425 return -1; 1426 } 1427 } 1428 1429 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1430 { 1431 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1432 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1433 uint64_t gaddr; 1434 uint8_t ar; 1435 1436 if (s390_has_feat(S390_FEAT_ZPCI)) { 1437 gaddr = get_base_disp_rsy(cpu, run, &ar); 1438 1439 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1440 } else { 1441 return -1; 1442 } 1443 } 1444 1445 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1446 { 1447 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1448 uint64_t fiba; 1449 uint8_t ar; 1450 1451 if (s390_has_feat(S390_FEAT_ZPCI)) { 1452 fiba = get_base_disp_rxy(cpu, run, &ar); 1453 1454 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1455 } else { 1456 return -1; 1457 } 1458 } 1459 1460 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1461 { 1462 int r = 0; 1463 1464 switch (ipa1) { 1465 case PRIV_B9_CLP: 1466 r = kvm_clp_service_call(cpu, run); 1467 break; 1468 case PRIV_B9_PCISTG: 1469 r = kvm_pcistg_service_call(cpu, run); 1470 break; 1471 case PRIV_B9_PCILG: 1472 r = kvm_pcilg_service_call(cpu, run); 1473 break; 1474 case PRIV_B9_RPCIT: 1475 r = kvm_rpcit_service_call(cpu, run); 1476 break; 1477 case PRIV_B9_EQBS: 1478 /* just inject exception */ 1479 r = -1; 1480 break; 1481 default: 1482 r = -1; 1483 trace_kvm_insn_unhandled_priv(ipa1); 1484 break; 1485 } 1486 1487 return r; 1488 } 1489 1490 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1491 { 1492 int r = 0; 1493 1494 switch (ipbl) { 1495 case PRIV_EB_PCISTB: 1496 r = kvm_pcistb_service_call(cpu, run); 1497 break; 1498 case PRIV_EB_SIC: 1499 r = kvm_sic_service_call(cpu, run); 1500 break; 1501 case PRIV_EB_SQBS: 1502 /* just inject exception */ 1503 r = -1; 1504 break; 1505 default: 1506 r = -1; 1507 trace_kvm_insn_unhandled_priv(ipbl); 1508 break; 1509 } 1510 1511 return r; 1512 } 1513 1514 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1515 { 1516 int r = 0; 1517 1518 switch (ipbl) { 1519 case PRIV_E3_MPCIFC: 1520 r = kvm_mpcifc_service_call(cpu, run); 1521 break; 1522 case PRIV_E3_STPCIFC: 1523 r = kvm_stpcifc_service_call(cpu, run); 1524 break; 1525 default: 1526 r = -1; 1527 trace_kvm_insn_unhandled_priv(ipbl); 1528 break; 1529 } 1530 1531 return r; 1532 } 1533 1534 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) 1535 { 1536 CPUS390XState *env = &cpu->env; 1537 int ret; 1538 1539 ret = s390_virtio_hypercall(env); 1540 if (ret == -EINVAL) { 1541 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1542 return 0; 1543 } 1544 1545 return ret; 1546 } 1547 1548 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1549 { 1550 uint64_t r1, r3; 1551 int rc; 1552 1553 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1554 r3 = run->s390_sieic.ipa & 0x000f; 1555 rc = handle_diag_288(&cpu->env, r1, r3); 1556 if (rc) { 1557 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1558 } 1559 } 1560 1561 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1562 { 1563 uint64_t r1, r3; 1564 1565 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1566 r3 = run->s390_sieic.ipa & 0x000f; 1567 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1568 } 1569 1570 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1571 { 1572 CPUS390XState *env = &cpu->env; 1573 unsigned long pc; 1574 1575 pc = env->psw.addr - sw_bp_ilen; 1576 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1577 env->psw.addr = pc; 1578 return EXCP_DEBUG; 1579 } 1580 1581 return -ENOENT; 1582 } 1583 1584 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) 1585 { 1586 CPUS390XState *env = &S390_CPU(cs)->env; 1587 1588 /* Feat bit is set only if KVM supports sync for diag318 */ 1589 if (s390_has_feat(S390_FEAT_DIAG_318)) { 1590 env->diag318_info = diag318_info; 1591 cs->kvm_run->s.regs.diag318 = diag318_info; 1592 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 1593 /* 1594 * diag 318 info is zeroed during a clear reset and 1595 * diag 308 IPL subcodes. 1596 */ 1597 } 1598 } 1599 1600 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) 1601 { 1602 uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; 1603 uint64_t diag318_info = run->s.regs.gprs[reg]; 1604 CPUState *t; 1605 1606 /* 1607 * DIAG 318 can only be enabled with KVM support. As such, let's 1608 * ensure a guest cannot execute this instruction erroneously. 1609 */ 1610 if (!s390_has_feat(S390_FEAT_DIAG_318)) { 1611 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1612 return; 1613 } 1614 1615 CPU_FOREACH(t) { 1616 run_on_cpu(t, s390_do_cpu_set_diag318, 1617 RUN_ON_CPU_HOST_ULONG(diag318_info)); 1618 } 1619 } 1620 1621 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1622 1623 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1624 { 1625 int r = 0; 1626 uint16_t func_code; 1627 1628 /* 1629 * For any diagnose call we support, bits 48-63 of the resulting 1630 * address specify the function code; the remainder is ignored. 1631 */ 1632 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1633 switch (func_code) { 1634 case DIAG_TIMEREVENT: 1635 kvm_handle_diag_288(cpu, run); 1636 break; 1637 case DIAG_IPL: 1638 kvm_handle_diag_308(cpu, run); 1639 break; 1640 case DIAG_SET_CONTROL_PROGRAM_CODES: 1641 handle_diag_318(cpu, run); 1642 break; 1643 case DIAG_KVM_HYPERCALL: 1644 r = handle_hypercall(cpu, run); 1645 break; 1646 case DIAG_KVM_BREAKPOINT: 1647 r = handle_sw_breakpoint(cpu, run); 1648 break; 1649 default: 1650 trace_kvm_insn_diag(func_code); 1651 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1652 break; 1653 } 1654 1655 return r; 1656 } 1657 1658 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1659 { 1660 CPUS390XState *env = &cpu->env; 1661 const uint8_t r1 = ipa1 >> 4; 1662 const uint8_t r3 = ipa1 & 0x0f; 1663 int ret; 1664 uint8_t order; 1665 1666 /* get order code */ 1667 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1668 1669 ret = handle_sigp(env, order, r1, r3); 1670 setcc(cpu, ret); 1671 return 0; 1672 } 1673 1674 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1675 { 1676 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1677 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1678 int r = -1; 1679 1680 trace_kvm_insn(run->s390_sieic.ipa, run->s390_sieic.ipb); 1681 switch (ipa0) { 1682 case IPA0_B2: 1683 r = handle_b2(cpu, run, ipa1); 1684 break; 1685 case IPA0_B9: 1686 r = handle_b9(cpu, run, ipa1); 1687 break; 1688 case IPA0_EB: 1689 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1690 break; 1691 case IPA0_E3: 1692 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1693 break; 1694 case IPA0_DIAG: 1695 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1696 break; 1697 case IPA0_SIGP: 1698 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1699 break; 1700 } 1701 1702 if (r < 0) { 1703 r = 0; 1704 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1705 } 1706 1707 return r; 1708 } 1709 1710 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, 1711 int pswoffset) 1712 { 1713 CPUState *cs = CPU(cpu); 1714 1715 s390_cpu_halt(cpu); 1716 cpu->env.crash_reason = reason; 1717 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 1718 } 1719 1720 /* try to detect pgm check loops */ 1721 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1722 { 1723 CPUState *cs = CPU(cpu); 1724 PSW oldpsw, newpsw; 1725 1726 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1727 offsetof(LowCore, program_new_psw)); 1728 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1729 offsetof(LowCore, program_new_psw) + 8); 1730 oldpsw.mask = run->psw_mask; 1731 oldpsw.addr = run->psw_addr; 1732 /* 1733 * Avoid endless loops of operation exceptions, if the pgm new 1734 * PSW will cause a new operation exception. 1735 * The heuristic checks if the pgm new psw is within 6 bytes before 1736 * the faulting psw address (with same DAT, AS settings) and the 1737 * new psw is not a wait psw and the fault was not triggered by 1738 * problem state. In that case go into crashed state. 1739 */ 1740 1741 if (oldpsw.addr - newpsw.addr <= 6 && 1742 !(newpsw.mask & PSW_MASK_WAIT) && 1743 !(oldpsw.mask & PSW_MASK_PSTATE) && 1744 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1745 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1746 unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, 1747 offsetof(LowCore, program_new_psw)); 1748 return EXCP_HALTED; 1749 } 1750 return 0; 1751 } 1752 1753 static int handle_intercept(S390CPU *cpu) 1754 { 1755 CPUState *cs = CPU(cpu); 1756 struct kvm_run *run = cs->kvm_run; 1757 int icpt_code = run->s390_sieic.icptcode; 1758 int r = 0; 1759 1760 trace_kvm_intercept(icpt_code, (long)run->psw_addr); 1761 switch (icpt_code) { 1762 case ICPT_INSTRUCTION: 1763 case ICPT_PV_INSTR: 1764 case ICPT_PV_INSTR_NOTIFICATION: 1765 r = handle_instruction(cpu, run); 1766 break; 1767 case ICPT_PROGRAM: 1768 unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, 1769 offsetof(LowCore, program_new_psw)); 1770 r = EXCP_HALTED; 1771 break; 1772 case ICPT_EXT_INT: 1773 unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, 1774 offsetof(LowCore, external_new_psw)); 1775 r = EXCP_HALTED; 1776 break; 1777 case ICPT_WAITPSW: 1778 /* disabled wait, since enabled wait is handled in kernel */ 1779 s390_handle_wait(cpu); 1780 r = EXCP_HALTED; 1781 break; 1782 case ICPT_CPU_STOP: 1783 do_stop_interrupt(&cpu->env); 1784 r = EXCP_HALTED; 1785 break; 1786 case ICPT_OPEREXC: 1787 /* check for break points */ 1788 r = handle_sw_breakpoint(cpu, run); 1789 if (r == -ENOENT) { 1790 /* Then check for potential pgm check loops */ 1791 r = handle_oper_loop(cpu, run); 1792 if (r == 0) { 1793 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1794 } 1795 } 1796 break; 1797 case ICPT_SOFT_INTERCEPT: 1798 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1799 exit(1); 1800 break; 1801 case ICPT_IO: 1802 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1803 exit(1); 1804 break; 1805 default: 1806 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1807 exit(1); 1808 break; 1809 } 1810 1811 return r; 1812 } 1813 1814 static int handle_tsch(S390CPU *cpu) 1815 { 1816 CPUState *cs = CPU(cpu); 1817 struct kvm_run *run = cs->kvm_run; 1818 int ret; 1819 1820 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1821 RA_IGNORED); 1822 if (ret < 0) { 1823 /* 1824 * Failure. 1825 * If an I/O interrupt had been dequeued, we have to reinject it. 1826 */ 1827 if (run->s390_tsch.dequeued) { 1828 s390_io_interrupt(run->s390_tsch.subchannel_id, 1829 run->s390_tsch.subchannel_nr, 1830 run->s390_tsch.io_int_parm, 1831 run->s390_tsch.io_int_word); 1832 } 1833 ret = 0; 1834 } 1835 return ret; 1836 } 1837 1838 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1839 { 1840 const MachineState *ms = MACHINE(qdev_get_machine()); 1841 uint16_t conf_cpus = 0, reserved_cpus = 0; 1842 SysIB_322 sysib; 1843 int del, i; 1844 1845 if (s390_is_pv()) { 1846 s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); 1847 } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1848 return; 1849 } 1850 /* Shift the stack of Extended Names to prepare for our own data */ 1851 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1852 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1853 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1854 * assumed it's not capable of managing Extended Names for lower levels. 1855 */ 1856 for (del = 1; del < sysib.count; del++) { 1857 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1858 break; 1859 } 1860 } 1861 if (del < sysib.count) { 1862 memset(sysib.ext_names[del], 0, 1863 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1864 } 1865 1866 /* count the cpus and split them into configured and reserved ones */ 1867 for (i = 0; i < ms->possible_cpus->len; i++) { 1868 if (ms->possible_cpus->cpus[i].cpu) { 1869 conf_cpus++; 1870 } else { 1871 reserved_cpus++; 1872 } 1873 } 1874 sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; 1875 sysib.vm[0].conf_cpus = conf_cpus; 1876 sysib.vm[0].reserved_cpus = reserved_cpus; 1877 1878 /* Insert short machine name in EBCDIC, padded with blanks */ 1879 if (qemu_name) { 1880 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1881 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1882 strlen(qemu_name))); 1883 } 1884 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1885 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1886 * considered by s390 as not capable of providing any Extended Name. 1887 * Therefore if no name was specified on qemu invocation, we go with the 1888 * same "KVMguest" default, which KVM has filled into short name field. 1889 */ 1890 strpadcpy((char *)sysib.ext_names[0], 1891 sizeof(sysib.ext_names[0]), 1892 qemu_name ?: "KVMguest", '\0'); 1893 1894 /* Insert UUID */ 1895 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1896 1897 if (s390_is_pv()) { 1898 s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); 1899 } else { 1900 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1901 } 1902 } 1903 1904 static int handle_stsi(S390CPU *cpu) 1905 { 1906 CPUState *cs = CPU(cpu); 1907 struct kvm_run *run = cs->kvm_run; 1908 1909 switch (run->s390_stsi.fc) { 1910 case 3: 1911 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1912 return 0; 1913 } 1914 /* Only sysib 3.2.2 needs post-handling for now. */ 1915 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1916 return 0; 1917 default: 1918 return 0; 1919 } 1920 } 1921 1922 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1923 { 1924 CPUState *cs = CPU(cpu); 1925 struct kvm_run *run = cs->kvm_run; 1926 1927 int ret = 0; 1928 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1929 1930 switch (arch_info->type) { 1931 case KVM_HW_WP_WRITE: 1932 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1933 cs->watchpoint_hit = &hw_watchpoint; 1934 hw_watchpoint.vaddr = arch_info->addr; 1935 hw_watchpoint.flags = BP_MEM_WRITE; 1936 ret = EXCP_DEBUG; 1937 } 1938 break; 1939 case KVM_HW_BP: 1940 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1941 ret = EXCP_DEBUG; 1942 } 1943 break; 1944 case KVM_SINGLESTEP: 1945 if (cs->singlestep_enabled) { 1946 ret = EXCP_DEBUG; 1947 } 1948 break; 1949 default: 1950 ret = -ENOSYS; 1951 } 1952 1953 return ret; 1954 } 1955 1956 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1957 { 1958 S390CPU *cpu = S390_CPU(cs); 1959 int ret = 0; 1960 1961 qemu_mutex_lock_iothread(); 1962 1963 kvm_cpu_synchronize_state(cs); 1964 1965 switch (run->exit_reason) { 1966 case KVM_EXIT_S390_SIEIC: 1967 ret = handle_intercept(cpu); 1968 break; 1969 case KVM_EXIT_S390_RESET: 1970 s390_ipl_reset_request(cs, S390_RESET_REIPL); 1971 break; 1972 case KVM_EXIT_S390_TSCH: 1973 ret = handle_tsch(cpu); 1974 break; 1975 case KVM_EXIT_S390_STSI: 1976 ret = handle_stsi(cpu); 1977 break; 1978 case KVM_EXIT_DEBUG: 1979 ret = kvm_arch_handle_debug_exit(cpu); 1980 break; 1981 default: 1982 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1983 break; 1984 } 1985 qemu_mutex_unlock_iothread(); 1986 1987 if (ret == 0) { 1988 ret = EXCP_INTERRUPT; 1989 } 1990 return ret; 1991 } 1992 1993 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 1994 { 1995 return true; 1996 } 1997 1998 void kvm_s390_enable_css_support(S390CPU *cpu) 1999 { 2000 int r; 2001 2002 /* Activate host kernel channel subsystem support. */ 2003 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 2004 assert(r == 0); 2005 } 2006 2007 void kvm_arch_init_irq_routing(KVMState *s) 2008 { 2009 /* 2010 * Note that while irqchip capabilities generally imply that cpustates 2011 * are handled in-kernel, it is not true for s390 (yet); therefore, we 2012 * have to override the common code kvm_halt_in_kernel_allowed setting. 2013 */ 2014 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2015 kvm_gsi_routing_allowed = true; 2016 kvm_halt_in_kernel_allowed = false; 2017 } 2018 } 2019 2020 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 2021 int vq, bool assign) 2022 { 2023 struct kvm_ioeventfd kick = { 2024 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 2025 KVM_IOEVENTFD_FLAG_DATAMATCH, 2026 .fd = event_notifier_get_fd(notifier), 2027 .datamatch = vq, 2028 .addr = sch, 2029 .len = 8, 2030 }; 2031 trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, 2032 kick.datamatch); 2033 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 2034 return -ENOSYS; 2035 } 2036 if (!assign) { 2037 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 2038 } 2039 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 2040 } 2041 2042 int kvm_s390_get_protected_dump(void) 2043 { 2044 return cap_protected_dump; 2045 } 2046 2047 int kvm_s390_get_ri(void) 2048 { 2049 return cap_ri; 2050 } 2051 2052 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2053 { 2054 struct kvm_mp_state mp_state = {}; 2055 int ret; 2056 2057 /* the kvm part might not have been initialized yet */ 2058 if (CPU(cpu)->kvm_state == NULL) { 2059 return 0; 2060 } 2061 2062 switch (cpu_state) { 2063 case S390_CPU_STATE_STOPPED: 2064 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2065 break; 2066 case S390_CPU_STATE_CHECK_STOP: 2067 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2068 break; 2069 case S390_CPU_STATE_OPERATING: 2070 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2071 break; 2072 case S390_CPU_STATE_LOAD: 2073 mp_state.mp_state = KVM_MP_STATE_LOAD; 2074 break; 2075 default: 2076 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2077 cpu_state); 2078 exit(1); 2079 } 2080 2081 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2082 if (ret) { 2083 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2084 strerror(-ret)); 2085 } 2086 2087 return ret; 2088 } 2089 2090 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2091 { 2092 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 2093 struct kvm_s390_irq_state irq_state = { 2094 .buf = (uint64_t) cpu->irqstate, 2095 .len = VCPU_IRQ_BUF_SIZE(max_cpus), 2096 }; 2097 CPUState *cs = CPU(cpu); 2098 int32_t bytes; 2099 2100 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2101 return; 2102 } 2103 2104 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2105 if (bytes < 0) { 2106 cpu->irqstate_saved_size = 0; 2107 error_report("Migration of interrupt state failed"); 2108 return; 2109 } 2110 2111 cpu->irqstate_saved_size = bytes; 2112 } 2113 2114 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2115 { 2116 CPUState *cs = CPU(cpu); 2117 struct kvm_s390_irq_state irq_state = { 2118 .buf = (uint64_t) cpu->irqstate, 2119 .len = cpu->irqstate_saved_size, 2120 }; 2121 int r; 2122 2123 if (cpu->irqstate_saved_size == 0) { 2124 return 0; 2125 } 2126 2127 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2128 return -ENOSYS; 2129 } 2130 2131 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2132 if (r) { 2133 error_report("Setting interrupt state failed %d", r); 2134 } 2135 return r; 2136 } 2137 2138 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2139 uint64_t address, uint32_t data, PCIDevice *dev) 2140 { 2141 S390PCIBusDevice *pbdev; 2142 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2143 2144 if (!dev) { 2145 trace_kvm_msi_route_fixup("no pci device"); 2146 return -ENODEV; 2147 } 2148 2149 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2150 if (!pbdev) { 2151 trace_kvm_msi_route_fixup("no zpci device"); 2152 return -ENODEV; 2153 } 2154 2155 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2156 route->flags = 0; 2157 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2158 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2159 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2160 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2161 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2162 return 0; 2163 } 2164 2165 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2166 int vector, PCIDevice *dev) 2167 { 2168 return 0; 2169 } 2170 2171 int kvm_arch_release_virq_post(int virq) 2172 { 2173 return 0; 2174 } 2175 2176 int kvm_arch_msi_data_to_gsi(uint32_t data) 2177 { 2178 abort(); 2179 } 2180 2181 static int query_cpu_subfunc(S390FeatBitmap features) 2182 { 2183 struct kvm_s390_vm_cpu_subfunc prop = {}; 2184 struct kvm_device_attr attr = { 2185 .group = KVM_S390_VM_CPU_MODEL, 2186 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2187 .addr = (uint64_t) &prop, 2188 }; 2189 int rc; 2190 2191 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2192 if (rc) { 2193 return rc; 2194 } 2195 2196 /* 2197 * We're going to add all subfunctions now, if the corresponding feature 2198 * is available that unlocks the query functions. 2199 */ 2200 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2201 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2202 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2203 } 2204 if (test_bit(S390_FEAT_MSA, features)) { 2205 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2206 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2207 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2208 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2209 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2210 } 2211 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2212 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2213 } 2214 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2215 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2216 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2217 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2218 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2219 } 2220 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2221 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2222 } 2223 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2224 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2225 } 2226 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2227 s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2228 } 2229 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2230 s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2231 } 2232 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2233 s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2234 } 2235 return 0; 2236 } 2237 2238 static int configure_cpu_subfunc(const S390FeatBitmap features) 2239 { 2240 struct kvm_s390_vm_cpu_subfunc prop = {}; 2241 struct kvm_device_attr attr = { 2242 .group = KVM_S390_VM_CPU_MODEL, 2243 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2244 .addr = (uint64_t) &prop, 2245 }; 2246 2247 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2248 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2249 /* hardware support might be missing, IBC will handle most of this */ 2250 return 0; 2251 } 2252 2253 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2254 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2255 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2256 } 2257 if (test_bit(S390_FEAT_MSA, features)) { 2258 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2259 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2260 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2261 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2262 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2263 } 2264 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2265 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2266 } 2267 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2268 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2269 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2270 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2271 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2272 } 2273 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2274 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2275 } 2276 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2277 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2278 } 2279 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2280 s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2281 } 2282 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2283 s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2284 } 2285 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2286 s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2287 } 2288 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2289 } 2290 2291 static bool ap_available(void) 2292 { 2293 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, 2294 KVM_S390_VM_CRYPTO_ENABLE_APIE); 2295 } 2296 2297 static bool ap_enabled(const S390FeatBitmap features) 2298 { 2299 return test_bit(S390_FEAT_AP, features); 2300 } 2301 2302 static bool uv_feat_supported(void) 2303 { 2304 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2305 KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST); 2306 } 2307 2308 static int query_uv_feat_guest(S390FeatBitmap features) 2309 { 2310 struct kvm_s390_vm_cpu_uv_feat prop = {}; 2311 struct kvm_device_attr attr = { 2312 .group = KVM_S390_VM_CPU_MODEL, 2313 .attr = KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST, 2314 .addr = (uint64_t) &prop, 2315 }; 2316 int rc; 2317 2318 /* AP support check is currently the only user of the UV feature test */ 2319 if (!(uv_feat_supported() && ap_available())) { 2320 return 0; 2321 } 2322 2323 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2324 if (rc) { 2325 return rc; 2326 } 2327 2328 if (prop.ap) { 2329 set_bit(S390_FEAT_UV_FEAT_AP, features); 2330 } 2331 if (prop.ap_intr) { 2332 set_bit(S390_FEAT_UV_FEAT_AP_INTR, features); 2333 } 2334 2335 return 0; 2336 } 2337 2338 static int kvm_to_feat[][2] = { 2339 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2340 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2341 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2342 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2343 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2344 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2345 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2346 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2347 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2348 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2349 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2350 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2351 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2352 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2353 }; 2354 2355 static int query_cpu_feat(S390FeatBitmap features) 2356 { 2357 struct kvm_s390_vm_cpu_feat prop = {}; 2358 struct kvm_device_attr attr = { 2359 .group = KVM_S390_VM_CPU_MODEL, 2360 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2361 .addr = (uint64_t) &prop, 2362 }; 2363 int rc; 2364 int i; 2365 2366 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2367 if (rc) { 2368 return rc; 2369 } 2370 2371 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2372 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2373 set_bit(kvm_to_feat[i][1], features); 2374 } 2375 } 2376 return 0; 2377 } 2378 2379 static int configure_cpu_feat(const S390FeatBitmap features) 2380 { 2381 struct kvm_s390_vm_cpu_feat prop = {}; 2382 struct kvm_device_attr attr = { 2383 .group = KVM_S390_VM_CPU_MODEL, 2384 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2385 .addr = (uint64_t) &prop, 2386 }; 2387 int i; 2388 2389 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2390 if (test_bit(kvm_to_feat[i][1], features)) { 2391 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2392 } 2393 } 2394 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2395 } 2396 2397 bool kvm_s390_cpu_models_supported(void) 2398 { 2399 if (!cpu_model_allowed()) { 2400 /* compatibility machines interfere with the cpu model */ 2401 return false; 2402 } 2403 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2404 KVM_S390_VM_CPU_MACHINE) && 2405 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2406 KVM_S390_VM_CPU_PROCESSOR) && 2407 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2408 KVM_S390_VM_CPU_MACHINE_FEAT) && 2409 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2410 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2411 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2412 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2413 } 2414 2415 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2416 { 2417 struct kvm_s390_vm_cpu_machine prop = {}; 2418 struct kvm_device_attr attr = { 2419 .group = KVM_S390_VM_CPU_MODEL, 2420 .attr = KVM_S390_VM_CPU_MACHINE, 2421 .addr = (uint64_t) &prop, 2422 }; 2423 uint16_t unblocked_ibc = 0, cpu_type = 0; 2424 int rc; 2425 2426 memset(model, 0, sizeof(*model)); 2427 2428 if (!kvm_s390_cpu_models_supported()) { 2429 error_setg(errp, "KVM doesn't support CPU models"); 2430 return; 2431 } 2432 2433 /* query the basic cpu model properties */ 2434 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2435 if (rc) { 2436 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2437 return; 2438 } 2439 2440 cpu_type = cpuid_type(prop.cpuid); 2441 if (has_ibc(prop.ibc)) { 2442 model->lowest_ibc = lowest_ibc(prop.ibc); 2443 unblocked_ibc = unblocked_ibc(prop.ibc); 2444 } 2445 model->cpu_id = cpuid_id(prop.cpuid); 2446 model->cpu_id_format = cpuid_format(prop.cpuid); 2447 model->cpu_ver = 0xff; 2448 2449 /* get supported cpu features indicated via STFL(E) */ 2450 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2451 (uint8_t *) prop.fac_mask); 2452 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2453 if (test_bit(S390_FEAT_STFLE, model->features)) { 2454 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2455 } 2456 /* get supported cpu features indicated e.g. via SCLP */ 2457 rc = query_cpu_feat(model->features); 2458 if (rc) { 2459 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2460 return; 2461 } 2462 /* get supported cpu subfunctions indicated via query / test bit */ 2463 rc = query_cpu_subfunc(model->features); 2464 if (rc) { 2465 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2466 return; 2467 } 2468 2469 /* PTFF subfunctions might be indicated although kernel support missing */ 2470 if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { 2471 clear_bit(S390_FEAT_PTFF_QSIE, model->features); 2472 clear_bit(S390_FEAT_PTFF_QTOUE, model->features); 2473 clear_bit(S390_FEAT_PTFF_STOE, model->features); 2474 clear_bit(S390_FEAT_PTFF_STOUE, model->features); 2475 } 2476 2477 /* with cpu model support, CMM is only indicated if really available */ 2478 if (kvm_s390_cmma_available()) { 2479 set_bit(S390_FEAT_CMM, model->features); 2480 } else { 2481 /* no cmm -> no cmm nt */ 2482 clear_bit(S390_FEAT_CMM_NT, model->features); 2483 } 2484 2485 /* bpb needs kernel support for migration, VSIE and reset */ 2486 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { 2487 clear_bit(S390_FEAT_BPB, model->features); 2488 } 2489 2490 /* 2491 * If we have support for protected virtualization, indicate 2492 * the protected virtualization IPL unpack facility. 2493 */ 2494 if (cap_protected) { 2495 set_bit(S390_FEAT_UNPACK, model->features); 2496 } 2497 2498 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2499 set_bit(S390_FEAT_ZPCI, model->features); 2500 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2501 2502 if (s390_known_cpu_type(cpu_type)) { 2503 /* we want the exact model, even if some features are missing */ 2504 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2505 ibc_ec_ga(unblocked_ibc), NULL); 2506 } else { 2507 /* model unknown, e.g. too new - search using features */ 2508 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2509 ibc_ec_ga(unblocked_ibc), 2510 model->features); 2511 } 2512 if (!model->def) { 2513 error_setg(errp, "KVM: host CPU model could not be identified"); 2514 return; 2515 } 2516 /* for now, we can only provide the AP feature with HW support */ 2517 if (ap_available()) { 2518 set_bit(S390_FEAT_AP, model->features); 2519 } 2520 2521 /* 2522 * Extended-Length SCCB is handled entirely within QEMU. 2523 * For PV guests this is completely fenced by the Ultravisor, as Service 2524 * Call error checking and STFLE interpretation are handled via SIE. 2525 */ 2526 set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); 2527 2528 if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { 2529 set_bit(S390_FEAT_DIAG_318, model->features); 2530 } 2531 2532 /* Test for Ultravisor features that influence secure guest behavior */ 2533 query_uv_feat_guest(model->features); 2534 2535 /* strip of features that are not part of the maximum model */ 2536 bitmap_and(model->features, model->features, model->def->full_feat, 2537 S390_FEAT_MAX); 2538 } 2539 2540 static int configure_uv_feat_guest(const S390FeatBitmap features) 2541 { 2542 struct kvm_s390_vm_cpu_uv_feat uv_feat = {}; 2543 struct kvm_device_attr attribute = { 2544 .group = KVM_S390_VM_CPU_MODEL, 2545 .attr = KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST, 2546 .addr = (__u64) &uv_feat, 2547 }; 2548 2549 /* AP support check is currently the only user of the UV feature test */ 2550 if (!(uv_feat_supported() && ap_enabled(features))) { 2551 return 0; 2552 } 2553 2554 if (test_bit(S390_FEAT_UV_FEAT_AP, features)) { 2555 uv_feat.ap = 1; 2556 } 2557 if (test_bit(S390_FEAT_UV_FEAT_AP_INTR, features)) { 2558 uv_feat.ap_intr = 1; 2559 } 2560 2561 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 2562 } 2563 2564 static void kvm_s390_configure_apie(bool interpret) 2565 { 2566 uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : 2567 KVM_S390_VM_CRYPTO_DISABLE_APIE; 2568 2569 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 2570 kvm_s390_set_crypto_attr(attr); 2571 } 2572 } 2573 2574 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2575 { 2576 struct kvm_s390_vm_cpu_processor prop = { 2577 .fac_list = { 0 }, 2578 }; 2579 struct kvm_device_attr attr = { 2580 .group = KVM_S390_VM_CPU_MODEL, 2581 .attr = KVM_S390_VM_CPU_PROCESSOR, 2582 .addr = (uint64_t) &prop, 2583 }; 2584 int rc; 2585 2586 if (!model) { 2587 /* compatibility handling if cpu models are disabled */ 2588 if (kvm_s390_cmma_available()) { 2589 kvm_s390_enable_cmma(); 2590 } 2591 return; 2592 } 2593 if (!kvm_s390_cpu_models_supported()) { 2594 error_setg(errp, "KVM doesn't support CPU models"); 2595 return; 2596 } 2597 prop.cpuid = s390_cpuid_from_cpu_model(model); 2598 prop.ibc = s390_ibc_from_cpu_model(model); 2599 /* configure cpu features indicated via STFL(e) */ 2600 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2601 (uint8_t *) prop.fac_list); 2602 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2603 if (rc) { 2604 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2605 return; 2606 } 2607 /* configure cpu features indicated e.g. via SCLP */ 2608 rc = configure_cpu_feat(model->features); 2609 if (rc) { 2610 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2611 return; 2612 } 2613 /* configure cpu subfunctions indicated via query / test bit */ 2614 rc = configure_cpu_subfunc(model->features); 2615 if (rc) { 2616 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2617 return; 2618 } 2619 /* enable CMM via CMMA */ 2620 if (test_bit(S390_FEAT_CMM, model->features)) { 2621 kvm_s390_enable_cmma(); 2622 } 2623 2624 if (ap_enabled(model->features)) { 2625 kvm_s390_configure_apie(true); 2626 } 2627 2628 /* configure UV-features for the guest indicated via query / test_bit */ 2629 rc = configure_uv_feat_guest(model->features); 2630 if (rc) { 2631 error_setg(errp, "KVM: Error configuring CPU UV features %d", rc); 2632 return; 2633 } 2634 } 2635 2636 void kvm_s390_restart_interrupt(S390CPU *cpu) 2637 { 2638 struct kvm_s390_irq irq = { 2639 .type = KVM_S390_RESTART, 2640 }; 2641 2642 kvm_s390_vcpu_interrupt(cpu, &irq); 2643 } 2644 2645 void kvm_s390_stop_interrupt(S390CPU *cpu) 2646 { 2647 struct kvm_s390_irq irq = { 2648 .type = KVM_S390_SIGP_STOP, 2649 }; 2650 2651 kvm_s390_vcpu_interrupt(cpu, &irq); 2652 } 2653 2654 bool kvm_arch_cpu_check_are_resettable(void) 2655 { 2656 return true; 2657 } 2658 2659 int kvm_s390_get_zpci_op(void) 2660 { 2661 return cap_zpci_op; 2662 } 2663 2664 void kvm_arch_accel_class_init(ObjectClass *oc) 2665 { 2666 } 2667