1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include <linux/kvm.h> 25 #include <asm/ptrace.h> 26 27 #include "cpu.h" 28 #include "s390x-internal.h" 29 #include "kvm_s390x.h" 30 #include "sysemu/kvm_int.h" 31 #include "qemu/cutils.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "qemu/timer.h" 35 #include "qemu/units.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/mmap-alloc.h" 38 #include "qemu/log.h" 39 #include "sysemu/sysemu.h" 40 #include "sysemu/hw_accel.h" 41 #include "sysemu/runstate.h" 42 #include "sysemu/device_tree.h" 43 #include "exec/gdbstub.h" 44 #include "exec/ram_addr.h" 45 #include "trace.h" 46 #include "hw/s390x/s390-pci-inst.h" 47 #include "hw/s390x/s390-pci-bus.h" 48 #include "hw/s390x/ipl.h" 49 #include "hw/s390x/ebcdic.h" 50 #include "exec/memattrs.h" 51 #include "hw/s390x/s390-virtio-ccw.h" 52 #include "hw/s390x/s390-virtio-hcall.h" 53 #include "target/s390x/kvm/pv.h" 54 55 #ifndef DEBUG_KVM 56 #define DEBUG_KVM 0 57 #endif 58 59 #define DPRINTF(fmt, ...) do { \ 60 if (DEBUG_KVM) { \ 61 fprintf(stderr, fmt, ## __VA_ARGS__); \ 62 } \ 63 } while (0) 64 65 #define kvm_vm_check_mem_attr(s, attr) \ 66 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 67 68 #define IPA0_DIAG 0x8300 69 #define IPA0_SIGP 0xae00 70 #define IPA0_B2 0xb200 71 #define IPA0_B9 0xb900 72 #define IPA0_EB 0xeb00 73 #define IPA0_E3 0xe300 74 75 #define PRIV_B2_SCLP_CALL 0x20 76 #define PRIV_B2_CSCH 0x30 77 #define PRIV_B2_HSCH 0x31 78 #define PRIV_B2_MSCH 0x32 79 #define PRIV_B2_SSCH 0x33 80 #define PRIV_B2_STSCH 0x34 81 #define PRIV_B2_TSCH 0x35 82 #define PRIV_B2_TPI 0x36 83 #define PRIV_B2_SAL 0x37 84 #define PRIV_B2_RSCH 0x38 85 #define PRIV_B2_STCRW 0x39 86 #define PRIV_B2_STCPS 0x3a 87 #define PRIV_B2_RCHP 0x3b 88 #define PRIV_B2_SCHM 0x3c 89 #define PRIV_B2_CHSC 0x5f 90 #define PRIV_B2_SIGA 0x74 91 #define PRIV_B2_XSCH 0x76 92 93 #define PRIV_EB_SQBS 0x8a 94 #define PRIV_EB_PCISTB 0xd0 95 #define PRIV_EB_SIC 0xd1 96 97 #define PRIV_B9_EQBS 0x9c 98 #define PRIV_B9_CLP 0xa0 99 #define PRIV_B9_PCISTG 0xd0 100 #define PRIV_B9_PCILG 0xd2 101 #define PRIV_B9_RPCIT 0xd3 102 103 #define PRIV_E3_MPCIFC 0xd0 104 #define PRIV_E3_STPCIFC 0xd4 105 106 #define DIAG_TIMEREVENT 0x288 107 #define DIAG_IPL 0x308 108 #define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 109 #define DIAG_KVM_HYPERCALL 0x500 110 #define DIAG_KVM_BREAKPOINT 0x501 111 112 #define ICPT_INSTRUCTION 0x04 113 #define ICPT_PROGRAM 0x08 114 #define ICPT_EXT_INT 0x14 115 #define ICPT_WAITPSW 0x1c 116 #define ICPT_SOFT_INTERCEPT 0x24 117 #define ICPT_CPU_STOP 0x28 118 #define ICPT_OPEREXC 0x2c 119 #define ICPT_IO 0x40 120 #define ICPT_PV_INSTR 0x68 121 #define ICPT_PV_INSTR_NOTIFICATION 0x6c 122 123 #define NR_LOCAL_IRQS 32 124 /* 125 * Needs to be big enough to contain max_cpus emergency signals 126 * and in addition NR_LOCAL_IRQS interrupts 127 */ 128 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ 129 (max_cpus + NR_LOCAL_IRQS)) 130 /* 131 * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages 132 * as the dirty bitmap must be managed by bitops that take an int as 133 * position indicator. This would end at an unaligned address 134 * (0x7fffff00000). As future variants might provide larger pages 135 * and to make all addresses properly aligned, let us split at 4TB. 136 */ 137 #define KVM_SLOT_MAX_BYTES (4UL * TiB) 138 139 static CPUWatchpoint hw_watchpoint; 140 /* 141 * We don't use a list because this structure is also used to transmit the 142 * hardware breakpoints to the kernel. 143 */ 144 static struct kvm_hw_breakpoint *hw_breakpoints; 145 static int nb_hw_breakpoints; 146 147 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 148 KVM_CAP_LAST_INFO 149 }; 150 151 static int cap_sync_regs; 152 static int cap_async_pf; 153 static int cap_mem_op; 154 static int cap_mem_op_extension; 155 static int cap_s390_irq; 156 static int cap_ri; 157 static int cap_hpage_1m; 158 static int cap_vcpu_resets; 159 static int cap_protected; 160 static int cap_zpci_op; 161 static int cap_protected_dump; 162 163 static bool mem_op_storage_key_support; 164 165 static int active_cmma; 166 167 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 168 { 169 struct kvm_device_attr attr = { 170 .group = KVM_S390_VM_MEM_CTRL, 171 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 172 .addr = (uint64_t) memory_limit, 173 }; 174 175 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 176 } 177 178 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 179 { 180 int rc; 181 182 struct kvm_device_attr attr = { 183 .group = KVM_S390_VM_MEM_CTRL, 184 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 185 .addr = (uint64_t) &new_limit, 186 }; 187 188 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 189 return 0; 190 } 191 192 rc = kvm_s390_query_mem_limit(hw_limit); 193 if (rc) { 194 return rc; 195 } else if (*hw_limit < new_limit) { 196 return -E2BIG; 197 } 198 199 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 200 } 201 202 int kvm_s390_cmma_active(void) 203 { 204 return active_cmma; 205 } 206 207 static bool kvm_s390_cmma_available(void) 208 { 209 static bool initialized, value; 210 211 if (!initialized) { 212 initialized = true; 213 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 214 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 215 } 216 return value; 217 } 218 219 void kvm_s390_cmma_reset(void) 220 { 221 int rc; 222 struct kvm_device_attr attr = { 223 .group = KVM_S390_VM_MEM_CTRL, 224 .attr = KVM_S390_VM_MEM_CLR_CMMA, 225 }; 226 227 if (!kvm_s390_cmma_active()) { 228 return; 229 } 230 231 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 232 trace_kvm_clear_cmma(rc); 233 } 234 235 static void kvm_s390_enable_cmma(void) 236 { 237 int rc; 238 struct kvm_device_attr attr = { 239 .group = KVM_S390_VM_MEM_CTRL, 240 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 241 }; 242 243 if (cap_hpage_1m) { 244 warn_report("CMM will not be enabled because it is not " 245 "compatible with huge memory backings."); 246 return; 247 } 248 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 249 active_cmma = !rc; 250 trace_kvm_enable_cmma(rc); 251 } 252 253 static void kvm_s390_set_attr(uint64_t attr) 254 { 255 struct kvm_device_attr attribute = { 256 .group = KVM_S390_VM_CRYPTO, 257 .attr = attr, 258 }; 259 260 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 261 262 if (ret) { 263 error_report("Failed to set crypto device attribute %lu: %s", 264 attr, strerror(-ret)); 265 } 266 } 267 268 static void kvm_s390_init_aes_kw(void) 269 { 270 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 271 272 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 273 NULL)) { 274 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 275 } 276 277 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 278 kvm_s390_set_attr(attr); 279 } 280 } 281 282 static void kvm_s390_init_dea_kw(void) 283 { 284 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 285 286 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 287 NULL)) { 288 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 289 } 290 291 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 292 kvm_s390_set_attr(attr); 293 } 294 } 295 296 void kvm_s390_crypto_reset(void) 297 { 298 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 299 kvm_s390_init_aes_kw(); 300 kvm_s390_init_dea_kw(); 301 } 302 } 303 304 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) 305 { 306 if (pagesize == 4 * KiB) { 307 return; 308 } 309 310 if (!hpage_1m_allowed()) { 311 error_setg(errp, "This QEMU machine does not support huge page " 312 "mappings"); 313 return; 314 } 315 316 if (pagesize != 1 * MiB) { 317 error_setg(errp, "Memory backing with 2G pages was specified, " 318 "but KVM does not support this memory backing"); 319 return; 320 } 321 322 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { 323 error_setg(errp, "Memory backing with 1M pages was specified, " 324 "but KVM does not support this memory backing"); 325 return; 326 } 327 328 cap_hpage_1m = 1; 329 } 330 331 int kvm_s390_get_hpage_1m(void) 332 { 333 return cap_hpage_1m; 334 } 335 336 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) 337 { 338 MachineClass *mc = MACHINE_CLASS(oc); 339 340 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 341 } 342 343 int kvm_arch_init(MachineState *ms, KVMState *s) 344 { 345 object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, 346 false, NULL); 347 348 if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { 349 error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " 350 "please use kernel 3.15 or newer"); 351 return -1; 352 } 353 if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { 354 error_report("KVM is missing capability KVM_CAP_S390_COW - " 355 "unsupported environment"); 356 return -1; 357 } 358 359 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); 360 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 361 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 362 cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); 363 mem_op_storage_key_support = cap_mem_op_extension > 0; 364 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 365 cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); 366 cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); 367 cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP); 368 cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP); 369 370 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 371 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 372 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 373 if (ri_allowed()) { 374 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 375 cap_ri = 1; 376 } 377 } 378 if (cpu_model_allowed()) { 379 kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); 380 } 381 382 /* 383 * The migration interface for ais was introduced with kernel 4.13 384 * but the capability itself had been active since 4.12. As migration 385 * support is considered necessary, we only try to enable this for 386 * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. 387 */ 388 if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && 389 kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { 390 kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); 391 } 392 393 kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); 394 return 0; 395 } 396 397 int kvm_arch_irqchip_create(KVMState *s) 398 { 399 return 0; 400 } 401 402 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 403 { 404 return cpu->cpu_index; 405 } 406 407 int kvm_arch_init_vcpu(CPUState *cs) 408 { 409 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 410 S390CPU *cpu = S390_CPU(cs); 411 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 412 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); 413 return 0; 414 } 415 416 int kvm_arch_destroy_vcpu(CPUState *cs) 417 { 418 S390CPU *cpu = S390_CPU(cs); 419 420 g_free(cpu->irqstate); 421 cpu->irqstate = NULL; 422 423 return 0; 424 } 425 426 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) 427 { 428 CPUState *cs = CPU(cpu); 429 430 /* 431 * The reset call is needed here to reset in-kernel vcpu data that 432 * we can't access directly from QEMU (i.e. with older kernels 433 * which don't support sync_regs/ONE_REG). Before this ioctl 434 * cpu_synchronize_state() is called in common kvm code 435 * (kvm-all). 436 */ 437 if (kvm_vcpu_ioctl(cs, type)) { 438 error_report("CPU reset failed on CPU %i type %lx", 439 cs->cpu_index, type); 440 } 441 } 442 443 void kvm_s390_reset_vcpu_initial(S390CPU *cpu) 444 { 445 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 446 } 447 448 void kvm_s390_reset_vcpu_clear(S390CPU *cpu) 449 { 450 if (cap_vcpu_resets) { 451 kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); 452 } else { 453 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 454 } 455 } 456 457 void kvm_s390_reset_vcpu_normal(S390CPU *cpu) 458 { 459 if (cap_vcpu_resets) { 460 kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); 461 } 462 } 463 464 static int can_sync_regs(CPUState *cs, int regs) 465 { 466 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; 467 } 468 469 int kvm_arch_put_registers(CPUState *cs, int level) 470 { 471 S390CPU *cpu = S390_CPU(cs); 472 CPUS390XState *env = &cpu->env; 473 struct kvm_sregs sregs; 474 struct kvm_regs regs; 475 struct kvm_fpu fpu = {}; 476 int r; 477 int i; 478 479 /* always save the PSW and the GPRS*/ 480 cs->kvm_run->psw_addr = env->psw.addr; 481 cs->kvm_run->psw_mask = env->psw.mask; 482 483 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 484 for (i = 0; i < 16; i++) { 485 cs->kvm_run->s.regs.gprs[i] = env->regs[i]; 486 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 487 } 488 } else { 489 for (i = 0; i < 16; i++) { 490 regs.gprs[i] = env->regs[i]; 491 } 492 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 493 if (r < 0) { 494 return r; 495 } 496 } 497 498 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 499 for (i = 0; i < 32; i++) { 500 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; 501 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; 502 } 503 cs->kvm_run->s.regs.fpc = env->fpc; 504 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 505 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 506 for (i = 0; i < 16; i++) { 507 cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); 508 } 509 cs->kvm_run->s.regs.fpc = env->fpc; 510 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 511 } else { 512 /* Floating point */ 513 for (i = 0; i < 16; i++) { 514 fpu.fprs[i] = *get_freg(env, i); 515 } 516 fpu.fpc = env->fpc; 517 518 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 519 if (r < 0) { 520 return r; 521 } 522 } 523 524 /* Do we need to save more than that? */ 525 if (level == KVM_PUT_RUNTIME_STATE) { 526 return 0; 527 } 528 529 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 530 cs->kvm_run->s.regs.cputm = env->cputm; 531 cs->kvm_run->s.regs.ckc = env->ckc; 532 cs->kvm_run->s.regs.todpr = env->todpr; 533 cs->kvm_run->s.regs.gbea = env->gbea; 534 cs->kvm_run->s.regs.pp = env->pp; 535 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 536 } else { 537 /* 538 * These ONE_REGS are not protected by a capability. As they are only 539 * necessary for migration we just trace a possible error, but don't 540 * return with an error return code. 541 */ 542 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 543 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 544 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 545 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 546 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 547 } 548 549 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 550 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 551 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 552 } 553 554 /* pfault parameters */ 555 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 556 cs->kvm_run->s.regs.pft = env->pfault_token; 557 cs->kvm_run->s.regs.pfs = env->pfault_select; 558 cs->kvm_run->s.regs.pfc = env->pfault_compare; 559 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 560 } else if (cap_async_pf) { 561 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 562 if (r < 0) { 563 return r; 564 } 565 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 566 if (r < 0) { 567 return r; 568 } 569 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 570 if (r < 0) { 571 return r; 572 } 573 } 574 575 /* access registers and control registers*/ 576 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 577 for (i = 0; i < 16; i++) { 578 cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; 579 cs->kvm_run->s.regs.crs[i] = env->cregs[i]; 580 } 581 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; 582 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; 583 } else { 584 for (i = 0; i < 16; i++) { 585 sregs.acrs[i] = env->aregs[i]; 586 sregs.crs[i] = env->cregs[i]; 587 } 588 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 589 if (r < 0) { 590 return r; 591 } 592 } 593 594 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 595 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 596 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 597 } 598 599 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 600 cs->kvm_run->s.regs.bpbc = env->bpbc; 601 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; 602 } 603 604 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 605 cs->kvm_run->s.regs.etoken = env->etoken; 606 cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; 607 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; 608 } 609 610 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 611 cs->kvm_run->s.regs.diag318 = env->diag318_info; 612 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 613 } 614 615 /* Finally the prefix */ 616 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 617 cs->kvm_run->s.regs.prefix = env->psa; 618 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; 619 } else { 620 /* prefix is only supported via sync regs */ 621 } 622 return 0; 623 } 624 625 int kvm_arch_get_registers(CPUState *cs) 626 { 627 S390CPU *cpu = S390_CPU(cs); 628 CPUS390XState *env = &cpu->env; 629 struct kvm_sregs sregs; 630 struct kvm_regs regs; 631 struct kvm_fpu fpu; 632 int i, r; 633 634 /* get the PSW */ 635 env->psw.addr = cs->kvm_run->psw_addr; 636 env->psw.mask = cs->kvm_run->psw_mask; 637 638 /* the GPRS */ 639 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 640 for (i = 0; i < 16; i++) { 641 env->regs[i] = cs->kvm_run->s.regs.gprs[i]; 642 } 643 } else { 644 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 645 if (r < 0) { 646 return r; 647 } 648 for (i = 0; i < 16; i++) { 649 env->regs[i] = regs.gprs[i]; 650 } 651 } 652 653 /* The ACRS and CRS */ 654 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 655 for (i = 0; i < 16; i++) { 656 env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; 657 env->cregs[i] = cs->kvm_run->s.regs.crs[i]; 658 } 659 } else { 660 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 661 if (r < 0) { 662 return r; 663 } 664 for (i = 0; i < 16; i++) { 665 env->aregs[i] = sregs.acrs[i]; 666 env->cregs[i] = sregs.crs[i]; 667 } 668 } 669 670 /* Floating point and vector registers */ 671 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 672 for (i = 0; i < 32; i++) { 673 env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; 674 env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; 675 } 676 env->fpc = cs->kvm_run->s.regs.fpc; 677 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 678 for (i = 0; i < 16; i++) { 679 *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; 680 } 681 env->fpc = cs->kvm_run->s.regs.fpc; 682 } else { 683 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 684 if (r < 0) { 685 return r; 686 } 687 for (i = 0; i < 16; i++) { 688 *get_freg(env, i) = fpu.fprs[i]; 689 } 690 env->fpc = fpu.fpc; 691 } 692 693 /* The prefix */ 694 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 695 env->psa = cs->kvm_run->s.regs.prefix; 696 } 697 698 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 699 env->cputm = cs->kvm_run->s.regs.cputm; 700 env->ckc = cs->kvm_run->s.regs.ckc; 701 env->todpr = cs->kvm_run->s.regs.todpr; 702 env->gbea = cs->kvm_run->s.regs.gbea; 703 env->pp = cs->kvm_run->s.regs.pp; 704 } else { 705 /* 706 * These ONE_REGS are not protected by a capability. As they are only 707 * necessary for migration we just trace a possible error, but don't 708 * return with an error return code. 709 */ 710 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 711 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 712 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 713 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 714 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 715 } 716 717 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 718 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 719 } 720 721 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 722 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 723 } 724 725 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 726 env->bpbc = cs->kvm_run->s.regs.bpbc; 727 } 728 729 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 730 env->etoken = cs->kvm_run->s.regs.etoken; 731 env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; 732 } 733 734 /* pfault parameters */ 735 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 736 env->pfault_token = cs->kvm_run->s.regs.pft; 737 env->pfault_select = cs->kvm_run->s.regs.pfs; 738 env->pfault_compare = cs->kvm_run->s.regs.pfc; 739 } else if (cap_async_pf) { 740 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 741 if (r < 0) { 742 return r; 743 } 744 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 745 if (r < 0) { 746 return r; 747 } 748 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 749 if (r < 0) { 750 return r; 751 } 752 } 753 754 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 755 env->diag318_info = cs->kvm_run->s.regs.diag318; 756 } 757 758 return 0; 759 } 760 761 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 762 { 763 int r; 764 struct kvm_device_attr attr = { 765 .group = KVM_S390_VM_TOD, 766 .attr = KVM_S390_VM_TOD_LOW, 767 .addr = (uint64_t)tod_low, 768 }; 769 770 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 771 if (r) { 772 return r; 773 } 774 775 attr.attr = KVM_S390_VM_TOD_HIGH; 776 attr.addr = (uint64_t)tod_high; 777 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 778 } 779 780 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 781 { 782 int r; 783 struct kvm_s390_vm_tod_clock gtod; 784 struct kvm_device_attr attr = { 785 .group = KVM_S390_VM_TOD, 786 .attr = KVM_S390_VM_TOD_EXT, 787 .addr = (uint64_t)>od, 788 }; 789 790 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 791 *tod_high = gtod.epoch_idx; 792 *tod_low = gtod.tod; 793 794 return r; 795 } 796 797 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) 798 { 799 int r; 800 struct kvm_device_attr attr = { 801 .group = KVM_S390_VM_TOD, 802 .attr = KVM_S390_VM_TOD_LOW, 803 .addr = (uint64_t)&tod_low, 804 }; 805 806 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 807 if (r) { 808 return r; 809 } 810 811 attr.attr = KVM_S390_VM_TOD_HIGH; 812 attr.addr = (uint64_t)&tod_high; 813 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 814 } 815 816 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) 817 { 818 struct kvm_s390_vm_tod_clock gtod = { 819 .epoch_idx = tod_high, 820 .tod = tod_low, 821 }; 822 struct kvm_device_attr attr = { 823 .group = KVM_S390_VM_TOD, 824 .attr = KVM_S390_VM_TOD_EXT, 825 .addr = (uint64_t)>od, 826 }; 827 828 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 829 } 830 831 /** 832 * kvm_s390_mem_op: 833 * @addr: the logical start address in guest memory 834 * @ar: the access register number 835 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 836 * @len: length that should be transferred 837 * @is_write: true = write, false = read 838 * Returns: 0 on success, non-zero if an exception or error occurred 839 * 840 * Use KVM ioctl to read/write from/to guest memory. An access exception 841 * is injected into the vCPU in case of translation errors. 842 */ 843 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 844 int len, bool is_write) 845 { 846 struct kvm_s390_mem_op mem_op = { 847 .gaddr = addr, 848 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 849 .size = len, 850 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 851 : KVM_S390_MEMOP_LOGICAL_READ, 852 .buf = (uint64_t)hostbuf, 853 .ar = ar, 854 .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, 855 }; 856 int ret; 857 858 if (!cap_mem_op) { 859 return -ENOSYS; 860 } 861 if (!hostbuf) { 862 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 863 } 864 if (mem_op_storage_key_support) { 865 mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; 866 } 867 868 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 869 if (ret < 0) { 870 warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 871 } 872 return ret; 873 } 874 875 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, 876 int len, bool is_write) 877 { 878 struct kvm_s390_mem_op mem_op = { 879 .sida_offset = offset, 880 .size = len, 881 .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE 882 : KVM_S390_MEMOP_SIDA_READ, 883 .buf = (uint64_t)hostbuf, 884 }; 885 int ret; 886 887 if (!cap_mem_op || !cap_protected) { 888 return -ENOSYS; 889 } 890 891 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 892 if (ret < 0) { 893 error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 894 abort(); 895 } 896 return ret; 897 } 898 899 static uint8_t const *sw_bp_inst; 900 static uint8_t sw_bp_ilen; 901 902 static void determine_sw_breakpoint_instr(void) 903 { 904 /* DIAG 501 is used for sw breakpoints with old kernels */ 905 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 906 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 907 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 908 909 if (sw_bp_inst) { 910 return; 911 } 912 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 913 sw_bp_inst = diag_501; 914 sw_bp_ilen = sizeof(diag_501); 915 DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); 916 } else { 917 sw_bp_inst = instr_0x0000; 918 sw_bp_ilen = sizeof(instr_0x0000); 919 DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); 920 } 921 } 922 923 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 924 { 925 determine_sw_breakpoint_instr(); 926 927 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 928 sw_bp_ilen, 0) || 929 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 930 return -EINVAL; 931 } 932 return 0; 933 } 934 935 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 936 { 937 uint8_t t[MAX_ILEN]; 938 939 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 940 return -EINVAL; 941 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 942 return -EINVAL; 943 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 944 sw_bp_ilen, 1)) { 945 return -EINVAL; 946 } 947 948 return 0; 949 } 950 951 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 952 int len, int type) 953 { 954 int n; 955 956 for (n = 0; n < nb_hw_breakpoints; n++) { 957 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 958 (hw_breakpoints[n].len == len || len == -1)) { 959 return &hw_breakpoints[n]; 960 } 961 } 962 963 return NULL; 964 } 965 966 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 967 { 968 int size; 969 970 if (find_hw_breakpoint(addr, len, type)) { 971 return -EEXIST; 972 } 973 974 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 975 976 if (!hw_breakpoints) { 977 nb_hw_breakpoints = 0; 978 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 979 } else { 980 hw_breakpoints = 981 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 982 } 983 984 if (!hw_breakpoints) { 985 nb_hw_breakpoints = 0; 986 return -ENOMEM; 987 } 988 989 hw_breakpoints[nb_hw_breakpoints].addr = addr; 990 hw_breakpoints[nb_hw_breakpoints].len = len; 991 hw_breakpoints[nb_hw_breakpoints].type = type; 992 993 nb_hw_breakpoints++; 994 995 return 0; 996 } 997 998 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 999 target_ulong len, int type) 1000 { 1001 switch (type) { 1002 case GDB_BREAKPOINT_HW: 1003 type = KVM_HW_BP; 1004 break; 1005 case GDB_WATCHPOINT_WRITE: 1006 if (len < 1) { 1007 return -EINVAL; 1008 } 1009 type = KVM_HW_WP_WRITE; 1010 break; 1011 default: 1012 return -ENOSYS; 1013 } 1014 return insert_hw_breakpoint(addr, len, type); 1015 } 1016 1017 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 1018 target_ulong len, int type) 1019 { 1020 int size; 1021 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 1022 1023 if (bp == NULL) { 1024 return -ENOENT; 1025 } 1026 1027 nb_hw_breakpoints--; 1028 if (nb_hw_breakpoints > 0) { 1029 /* 1030 * In order to trim the array, move the last element to the position to 1031 * be removed - if necessary. 1032 */ 1033 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 1034 *bp = hw_breakpoints[nb_hw_breakpoints]; 1035 } 1036 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 1037 hw_breakpoints = 1038 g_realloc(hw_breakpoints, size); 1039 } else { 1040 g_free(hw_breakpoints); 1041 hw_breakpoints = NULL; 1042 } 1043 1044 return 0; 1045 } 1046 1047 void kvm_arch_remove_all_hw_breakpoints(void) 1048 { 1049 nb_hw_breakpoints = 0; 1050 g_free(hw_breakpoints); 1051 hw_breakpoints = NULL; 1052 } 1053 1054 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 1055 { 1056 int i; 1057 1058 if (nb_hw_breakpoints > 0) { 1059 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 1060 dbg->arch.hw_bp = hw_breakpoints; 1061 1062 for (i = 0; i < nb_hw_breakpoints; ++i) { 1063 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 1064 hw_breakpoints[i].addr); 1065 } 1066 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 1067 } else { 1068 dbg->arch.nr_hw_bp = 0; 1069 dbg->arch.hw_bp = NULL; 1070 } 1071 } 1072 1073 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 1074 { 1075 } 1076 1077 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1078 { 1079 return MEMTXATTRS_UNSPECIFIED; 1080 } 1081 1082 int kvm_arch_process_async_events(CPUState *cs) 1083 { 1084 return cs->halted; 1085 } 1086 1087 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 1088 struct kvm_s390_interrupt *interrupt) 1089 { 1090 int r = 0; 1091 1092 interrupt->type = irq->type; 1093 switch (irq->type) { 1094 case KVM_S390_INT_VIRTIO: 1095 interrupt->parm = irq->u.ext.ext_params; 1096 /* fall through */ 1097 case KVM_S390_INT_PFAULT_INIT: 1098 case KVM_S390_INT_PFAULT_DONE: 1099 interrupt->parm64 = irq->u.ext.ext_params2; 1100 break; 1101 case KVM_S390_PROGRAM_INT: 1102 interrupt->parm = irq->u.pgm.code; 1103 break; 1104 case KVM_S390_SIGP_SET_PREFIX: 1105 interrupt->parm = irq->u.prefix.address; 1106 break; 1107 case KVM_S390_INT_SERVICE: 1108 interrupt->parm = irq->u.ext.ext_params; 1109 break; 1110 case KVM_S390_MCHK: 1111 interrupt->parm = irq->u.mchk.cr14; 1112 interrupt->parm64 = irq->u.mchk.mcic; 1113 break; 1114 case KVM_S390_INT_EXTERNAL_CALL: 1115 interrupt->parm = irq->u.extcall.code; 1116 break; 1117 case KVM_S390_INT_EMERGENCY: 1118 interrupt->parm = irq->u.emerg.code; 1119 break; 1120 case KVM_S390_SIGP_STOP: 1121 case KVM_S390_RESTART: 1122 break; /* These types have no parameters */ 1123 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1124 interrupt->parm = irq->u.io.subchannel_id << 16; 1125 interrupt->parm |= irq->u.io.subchannel_nr; 1126 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 1127 interrupt->parm64 |= irq->u.io.io_int_word; 1128 break; 1129 default: 1130 r = -EINVAL; 1131 break; 1132 } 1133 return r; 1134 } 1135 1136 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 1137 { 1138 struct kvm_s390_interrupt kvmint = {}; 1139 int r; 1140 1141 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1142 if (r < 0) { 1143 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1144 exit(1); 1145 } 1146 1147 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1148 if (r < 0) { 1149 fprintf(stderr, "KVM failed to inject interrupt\n"); 1150 exit(1); 1151 } 1152 } 1153 1154 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1155 { 1156 CPUState *cs = CPU(cpu); 1157 int r; 1158 1159 if (cap_s390_irq) { 1160 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1161 if (!r) { 1162 return; 1163 } 1164 error_report("KVM failed to inject interrupt %llx", irq->type); 1165 exit(1); 1166 } 1167 1168 inject_vcpu_irq_legacy(cs, irq); 1169 } 1170 1171 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) 1172 { 1173 struct kvm_s390_interrupt kvmint = {}; 1174 int r; 1175 1176 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1177 if (r < 0) { 1178 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1179 exit(1); 1180 } 1181 1182 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1183 if (r < 0) { 1184 fprintf(stderr, "KVM failed to inject interrupt\n"); 1185 exit(1); 1186 } 1187 } 1188 1189 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1190 { 1191 struct kvm_s390_irq irq = { 1192 .type = KVM_S390_PROGRAM_INT, 1193 .u.pgm.code = code, 1194 }; 1195 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", 1196 cpu->env.psw.addr); 1197 kvm_s390_vcpu_interrupt(cpu, &irq); 1198 } 1199 1200 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1201 { 1202 struct kvm_s390_irq irq = { 1203 .type = KVM_S390_PROGRAM_INT, 1204 .u.pgm.code = code, 1205 .u.pgm.trans_exc_code = te_code, 1206 .u.pgm.exc_access_id = te_code & 3, 1207 }; 1208 1209 kvm_s390_vcpu_interrupt(cpu, &irq); 1210 } 1211 1212 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1213 uint16_t ipbh0) 1214 { 1215 CPUS390XState *env = &cpu->env; 1216 uint64_t sccb; 1217 uint32_t code; 1218 int r; 1219 1220 sccb = env->regs[ipbh0 & 0xf]; 1221 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1222 1223 switch (run->s390_sieic.icptcode) { 1224 case ICPT_PV_INSTR_NOTIFICATION: 1225 g_assert(s390_is_pv()); 1226 /* The notification intercepts are currently handled by KVM */ 1227 error_report("unexpected SCLP PV notification"); 1228 exit(1); 1229 break; 1230 case ICPT_PV_INSTR: 1231 g_assert(s390_is_pv()); 1232 sclp_service_call_protected(env, sccb, code); 1233 /* Setting the CC is done by the Ultravisor. */ 1234 break; 1235 case ICPT_INSTRUCTION: 1236 g_assert(!s390_is_pv()); 1237 r = sclp_service_call(env, sccb, code); 1238 if (r < 0) { 1239 kvm_s390_program_interrupt(cpu, -r); 1240 return; 1241 } 1242 setcc(cpu, r); 1243 } 1244 } 1245 1246 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1247 { 1248 CPUS390XState *env = &cpu->env; 1249 int rc = 0; 1250 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1251 1252 switch (ipa1) { 1253 case PRIV_B2_XSCH: 1254 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1255 break; 1256 case PRIV_B2_CSCH: 1257 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1258 break; 1259 case PRIV_B2_HSCH: 1260 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1261 break; 1262 case PRIV_B2_MSCH: 1263 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1264 break; 1265 case PRIV_B2_SSCH: 1266 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1267 break; 1268 case PRIV_B2_STCRW: 1269 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1270 break; 1271 case PRIV_B2_STSCH: 1272 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1273 break; 1274 case PRIV_B2_TSCH: 1275 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1276 fprintf(stderr, "Spurious tsch intercept\n"); 1277 break; 1278 case PRIV_B2_CHSC: 1279 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1280 break; 1281 case PRIV_B2_TPI: 1282 /* This should have been handled by kvm already. */ 1283 fprintf(stderr, "Spurious tpi intercept\n"); 1284 break; 1285 case PRIV_B2_SCHM: 1286 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1287 run->s390_sieic.ipb, RA_IGNORED); 1288 break; 1289 case PRIV_B2_RSCH: 1290 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1291 break; 1292 case PRIV_B2_RCHP: 1293 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1294 break; 1295 case PRIV_B2_STCPS: 1296 /* We do not provide this instruction, it is suppressed. */ 1297 break; 1298 case PRIV_B2_SAL: 1299 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1300 break; 1301 case PRIV_B2_SIGA: 1302 /* Not provided, set CC = 3 for subchannel not operational */ 1303 setcc(cpu, 3); 1304 break; 1305 case PRIV_B2_SCLP_CALL: 1306 kvm_sclp_service_call(cpu, run, ipbh0); 1307 break; 1308 default: 1309 rc = -1; 1310 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); 1311 break; 1312 } 1313 1314 return rc; 1315 } 1316 1317 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1318 uint8_t *ar) 1319 { 1320 CPUS390XState *env = &cpu->env; 1321 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1322 uint32_t base2 = run->s390_sieic.ipb >> 28; 1323 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1324 ((run->s390_sieic.ipb & 0xff00) << 4); 1325 1326 if (disp2 & 0x80000) { 1327 disp2 += 0xfff00000; 1328 } 1329 if (ar) { 1330 *ar = base2; 1331 } 1332 1333 return (base2 ? env->regs[base2] : 0) + 1334 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1335 } 1336 1337 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1338 uint8_t *ar) 1339 { 1340 CPUS390XState *env = &cpu->env; 1341 uint32_t base2 = run->s390_sieic.ipb >> 28; 1342 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1343 ((run->s390_sieic.ipb & 0xff00) << 4); 1344 1345 if (disp2 & 0x80000) { 1346 disp2 += 0xfff00000; 1347 } 1348 if (ar) { 1349 *ar = base2; 1350 } 1351 1352 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1353 } 1354 1355 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1356 { 1357 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1358 1359 if (s390_has_feat(S390_FEAT_ZPCI)) { 1360 return clp_service_call(cpu, r2, RA_IGNORED); 1361 } else { 1362 return -1; 1363 } 1364 } 1365 1366 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1367 { 1368 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1369 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1370 1371 if (s390_has_feat(S390_FEAT_ZPCI)) { 1372 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1373 } else { 1374 return -1; 1375 } 1376 } 1377 1378 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1379 { 1380 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1381 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1382 1383 if (s390_has_feat(S390_FEAT_ZPCI)) { 1384 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1385 } else { 1386 return -1; 1387 } 1388 } 1389 1390 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1391 { 1392 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1393 uint64_t fiba; 1394 uint8_t ar; 1395 1396 if (s390_has_feat(S390_FEAT_ZPCI)) { 1397 fiba = get_base_disp_rxy(cpu, run, &ar); 1398 1399 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1400 } else { 1401 return -1; 1402 } 1403 } 1404 1405 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1406 { 1407 CPUS390XState *env = &cpu->env; 1408 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1409 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1410 uint8_t isc; 1411 uint16_t mode; 1412 int r; 1413 1414 mode = env->regs[r1] & 0xffff; 1415 isc = (env->regs[r3] >> 27) & 0x7; 1416 r = css_do_sic(env, isc, mode); 1417 if (r) { 1418 kvm_s390_program_interrupt(cpu, -r); 1419 } 1420 1421 return 0; 1422 } 1423 1424 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1425 { 1426 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1427 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1428 1429 if (s390_has_feat(S390_FEAT_ZPCI)) { 1430 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1431 } else { 1432 return -1; 1433 } 1434 } 1435 1436 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1437 { 1438 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1439 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1440 uint64_t gaddr; 1441 uint8_t ar; 1442 1443 if (s390_has_feat(S390_FEAT_ZPCI)) { 1444 gaddr = get_base_disp_rsy(cpu, run, &ar); 1445 1446 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1447 } else { 1448 return -1; 1449 } 1450 } 1451 1452 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1453 { 1454 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1455 uint64_t fiba; 1456 uint8_t ar; 1457 1458 if (s390_has_feat(S390_FEAT_ZPCI)) { 1459 fiba = get_base_disp_rxy(cpu, run, &ar); 1460 1461 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1462 } else { 1463 return -1; 1464 } 1465 } 1466 1467 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1468 { 1469 int r = 0; 1470 1471 switch (ipa1) { 1472 case PRIV_B9_CLP: 1473 r = kvm_clp_service_call(cpu, run); 1474 break; 1475 case PRIV_B9_PCISTG: 1476 r = kvm_pcistg_service_call(cpu, run); 1477 break; 1478 case PRIV_B9_PCILG: 1479 r = kvm_pcilg_service_call(cpu, run); 1480 break; 1481 case PRIV_B9_RPCIT: 1482 r = kvm_rpcit_service_call(cpu, run); 1483 break; 1484 case PRIV_B9_EQBS: 1485 /* just inject exception */ 1486 r = -1; 1487 break; 1488 default: 1489 r = -1; 1490 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); 1491 break; 1492 } 1493 1494 return r; 1495 } 1496 1497 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1498 { 1499 int r = 0; 1500 1501 switch (ipbl) { 1502 case PRIV_EB_PCISTB: 1503 r = kvm_pcistb_service_call(cpu, run); 1504 break; 1505 case PRIV_EB_SIC: 1506 r = kvm_sic_service_call(cpu, run); 1507 break; 1508 case PRIV_EB_SQBS: 1509 /* just inject exception */ 1510 r = -1; 1511 break; 1512 default: 1513 r = -1; 1514 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); 1515 break; 1516 } 1517 1518 return r; 1519 } 1520 1521 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1522 { 1523 int r = 0; 1524 1525 switch (ipbl) { 1526 case PRIV_E3_MPCIFC: 1527 r = kvm_mpcifc_service_call(cpu, run); 1528 break; 1529 case PRIV_E3_STPCIFC: 1530 r = kvm_stpcifc_service_call(cpu, run); 1531 break; 1532 default: 1533 r = -1; 1534 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); 1535 break; 1536 } 1537 1538 return r; 1539 } 1540 1541 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) 1542 { 1543 CPUS390XState *env = &cpu->env; 1544 int ret; 1545 1546 ret = s390_virtio_hypercall(env); 1547 if (ret == -EINVAL) { 1548 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1549 return 0; 1550 } 1551 1552 return ret; 1553 } 1554 1555 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1556 { 1557 uint64_t r1, r3; 1558 int rc; 1559 1560 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1561 r3 = run->s390_sieic.ipa & 0x000f; 1562 rc = handle_diag_288(&cpu->env, r1, r3); 1563 if (rc) { 1564 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1565 } 1566 } 1567 1568 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1569 { 1570 uint64_t r1, r3; 1571 1572 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1573 r3 = run->s390_sieic.ipa & 0x000f; 1574 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1575 } 1576 1577 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1578 { 1579 CPUS390XState *env = &cpu->env; 1580 unsigned long pc; 1581 1582 pc = env->psw.addr - sw_bp_ilen; 1583 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1584 env->psw.addr = pc; 1585 return EXCP_DEBUG; 1586 } 1587 1588 return -ENOENT; 1589 } 1590 1591 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) 1592 { 1593 CPUS390XState *env = &S390_CPU(cs)->env; 1594 1595 /* Feat bit is set only if KVM supports sync for diag318 */ 1596 if (s390_has_feat(S390_FEAT_DIAG_318)) { 1597 env->diag318_info = diag318_info; 1598 cs->kvm_run->s.regs.diag318 = diag318_info; 1599 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 1600 /* 1601 * diag 318 info is zeroed during a clear reset and 1602 * diag 308 IPL subcodes. 1603 */ 1604 } 1605 } 1606 1607 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) 1608 { 1609 uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; 1610 uint64_t diag318_info = run->s.regs.gprs[reg]; 1611 CPUState *t; 1612 1613 /* 1614 * DIAG 318 can only be enabled with KVM support. As such, let's 1615 * ensure a guest cannot execute this instruction erroneously. 1616 */ 1617 if (!s390_has_feat(S390_FEAT_DIAG_318)) { 1618 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1619 return; 1620 } 1621 1622 CPU_FOREACH(t) { 1623 run_on_cpu(t, s390_do_cpu_set_diag318, 1624 RUN_ON_CPU_HOST_ULONG(diag318_info)); 1625 } 1626 } 1627 1628 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1629 1630 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1631 { 1632 int r = 0; 1633 uint16_t func_code; 1634 1635 /* 1636 * For any diagnose call we support, bits 48-63 of the resulting 1637 * address specify the function code; the remainder is ignored. 1638 */ 1639 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1640 switch (func_code) { 1641 case DIAG_TIMEREVENT: 1642 kvm_handle_diag_288(cpu, run); 1643 break; 1644 case DIAG_IPL: 1645 kvm_handle_diag_308(cpu, run); 1646 break; 1647 case DIAG_SET_CONTROL_PROGRAM_CODES: 1648 handle_diag_318(cpu, run); 1649 break; 1650 case DIAG_KVM_HYPERCALL: 1651 r = handle_hypercall(cpu, run); 1652 break; 1653 case DIAG_KVM_BREAKPOINT: 1654 r = handle_sw_breakpoint(cpu, run); 1655 break; 1656 default: 1657 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); 1658 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1659 break; 1660 } 1661 1662 return r; 1663 } 1664 1665 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1666 { 1667 CPUS390XState *env = &cpu->env; 1668 const uint8_t r1 = ipa1 >> 4; 1669 const uint8_t r3 = ipa1 & 0x0f; 1670 int ret; 1671 uint8_t order; 1672 1673 /* get order code */ 1674 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1675 1676 ret = handle_sigp(env, order, r1, r3); 1677 setcc(cpu, ret); 1678 return 0; 1679 } 1680 1681 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1682 { 1683 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1684 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1685 int r = -1; 1686 1687 DPRINTF("handle_instruction 0x%x 0x%x\n", 1688 run->s390_sieic.ipa, run->s390_sieic.ipb); 1689 switch (ipa0) { 1690 case IPA0_B2: 1691 r = handle_b2(cpu, run, ipa1); 1692 break; 1693 case IPA0_B9: 1694 r = handle_b9(cpu, run, ipa1); 1695 break; 1696 case IPA0_EB: 1697 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1698 break; 1699 case IPA0_E3: 1700 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1701 break; 1702 case IPA0_DIAG: 1703 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1704 break; 1705 case IPA0_SIGP: 1706 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1707 break; 1708 } 1709 1710 if (r < 0) { 1711 r = 0; 1712 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1713 } 1714 1715 return r; 1716 } 1717 1718 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, 1719 int pswoffset) 1720 { 1721 CPUState *cs = CPU(cpu); 1722 1723 s390_cpu_halt(cpu); 1724 cpu->env.crash_reason = reason; 1725 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 1726 } 1727 1728 /* try to detect pgm check loops */ 1729 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1730 { 1731 CPUState *cs = CPU(cpu); 1732 PSW oldpsw, newpsw; 1733 1734 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1735 offsetof(LowCore, program_new_psw)); 1736 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1737 offsetof(LowCore, program_new_psw) + 8); 1738 oldpsw.mask = run->psw_mask; 1739 oldpsw.addr = run->psw_addr; 1740 /* 1741 * Avoid endless loops of operation exceptions, if the pgm new 1742 * PSW will cause a new operation exception. 1743 * The heuristic checks if the pgm new psw is within 6 bytes before 1744 * the faulting psw address (with same DAT, AS settings) and the 1745 * new psw is not a wait psw and the fault was not triggered by 1746 * problem state. In that case go into crashed state. 1747 */ 1748 1749 if (oldpsw.addr - newpsw.addr <= 6 && 1750 !(newpsw.mask & PSW_MASK_WAIT) && 1751 !(oldpsw.mask & PSW_MASK_PSTATE) && 1752 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1753 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1754 unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, 1755 offsetof(LowCore, program_new_psw)); 1756 return EXCP_HALTED; 1757 } 1758 return 0; 1759 } 1760 1761 static int handle_intercept(S390CPU *cpu) 1762 { 1763 CPUState *cs = CPU(cpu); 1764 struct kvm_run *run = cs->kvm_run; 1765 int icpt_code = run->s390_sieic.icptcode; 1766 int r = 0; 1767 1768 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, (long)run->psw_addr); 1769 switch (icpt_code) { 1770 case ICPT_INSTRUCTION: 1771 case ICPT_PV_INSTR: 1772 case ICPT_PV_INSTR_NOTIFICATION: 1773 r = handle_instruction(cpu, run); 1774 break; 1775 case ICPT_PROGRAM: 1776 unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, 1777 offsetof(LowCore, program_new_psw)); 1778 r = EXCP_HALTED; 1779 break; 1780 case ICPT_EXT_INT: 1781 unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, 1782 offsetof(LowCore, external_new_psw)); 1783 r = EXCP_HALTED; 1784 break; 1785 case ICPT_WAITPSW: 1786 /* disabled wait, since enabled wait is handled in kernel */ 1787 s390_handle_wait(cpu); 1788 r = EXCP_HALTED; 1789 break; 1790 case ICPT_CPU_STOP: 1791 do_stop_interrupt(&cpu->env); 1792 r = EXCP_HALTED; 1793 break; 1794 case ICPT_OPEREXC: 1795 /* check for break points */ 1796 r = handle_sw_breakpoint(cpu, run); 1797 if (r == -ENOENT) { 1798 /* Then check for potential pgm check loops */ 1799 r = handle_oper_loop(cpu, run); 1800 if (r == 0) { 1801 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1802 } 1803 } 1804 break; 1805 case ICPT_SOFT_INTERCEPT: 1806 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1807 exit(1); 1808 break; 1809 case ICPT_IO: 1810 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1811 exit(1); 1812 break; 1813 default: 1814 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1815 exit(1); 1816 break; 1817 } 1818 1819 return r; 1820 } 1821 1822 static int handle_tsch(S390CPU *cpu) 1823 { 1824 CPUState *cs = CPU(cpu); 1825 struct kvm_run *run = cs->kvm_run; 1826 int ret; 1827 1828 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1829 RA_IGNORED); 1830 if (ret < 0) { 1831 /* 1832 * Failure. 1833 * If an I/O interrupt had been dequeued, we have to reinject it. 1834 */ 1835 if (run->s390_tsch.dequeued) { 1836 s390_io_interrupt(run->s390_tsch.subchannel_id, 1837 run->s390_tsch.subchannel_nr, 1838 run->s390_tsch.io_int_parm, 1839 run->s390_tsch.io_int_word); 1840 } 1841 ret = 0; 1842 } 1843 return ret; 1844 } 1845 1846 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1847 { 1848 const MachineState *ms = MACHINE(qdev_get_machine()); 1849 uint16_t conf_cpus = 0, reserved_cpus = 0; 1850 SysIB_322 sysib; 1851 int del, i; 1852 1853 if (s390_is_pv()) { 1854 s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); 1855 } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1856 return; 1857 } 1858 /* Shift the stack of Extended Names to prepare for our own data */ 1859 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1860 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1861 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1862 * assumed it's not capable of managing Extended Names for lower levels. 1863 */ 1864 for (del = 1; del < sysib.count; del++) { 1865 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1866 break; 1867 } 1868 } 1869 if (del < sysib.count) { 1870 memset(sysib.ext_names[del], 0, 1871 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1872 } 1873 1874 /* count the cpus and split them into configured and reserved ones */ 1875 for (i = 0; i < ms->possible_cpus->len; i++) { 1876 if (ms->possible_cpus->cpus[i].cpu) { 1877 conf_cpus++; 1878 } else { 1879 reserved_cpus++; 1880 } 1881 } 1882 sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; 1883 sysib.vm[0].conf_cpus = conf_cpus; 1884 sysib.vm[0].reserved_cpus = reserved_cpus; 1885 1886 /* Insert short machine name in EBCDIC, padded with blanks */ 1887 if (qemu_name) { 1888 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1889 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1890 strlen(qemu_name))); 1891 } 1892 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1893 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1894 * considered by s390 as not capable of providing any Extended Name. 1895 * Therefore if no name was specified on qemu invocation, we go with the 1896 * same "KVMguest" default, which KVM has filled into short name field. 1897 */ 1898 strpadcpy((char *)sysib.ext_names[0], 1899 sizeof(sysib.ext_names[0]), 1900 qemu_name ?: "KVMguest", '\0'); 1901 1902 /* Insert UUID */ 1903 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1904 1905 if (s390_is_pv()) { 1906 s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); 1907 } else { 1908 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1909 } 1910 } 1911 1912 static int handle_stsi(S390CPU *cpu) 1913 { 1914 CPUState *cs = CPU(cpu); 1915 struct kvm_run *run = cs->kvm_run; 1916 1917 switch (run->s390_stsi.fc) { 1918 case 3: 1919 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1920 return 0; 1921 } 1922 /* Only sysib 3.2.2 needs post-handling for now. */ 1923 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1924 return 0; 1925 default: 1926 return 0; 1927 } 1928 } 1929 1930 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1931 { 1932 CPUState *cs = CPU(cpu); 1933 struct kvm_run *run = cs->kvm_run; 1934 1935 int ret = 0; 1936 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1937 1938 switch (arch_info->type) { 1939 case KVM_HW_WP_WRITE: 1940 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1941 cs->watchpoint_hit = &hw_watchpoint; 1942 hw_watchpoint.vaddr = arch_info->addr; 1943 hw_watchpoint.flags = BP_MEM_WRITE; 1944 ret = EXCP_DEBUG; 1945 } 1946 break; 1947 case KVM_HW_BP: 1948 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1949 ret = EXCP_DEBUG; 1950 } 1951 break; 1952 case KVM_SINGLESTEP: 1953 if (cs->singlestep_enabled) { 1954 ret = EXCP_DEBUG; 1955 } 1956 break; 1957 default: 1958 ret = -ENOSYS; 1959 } 1960 1961 return ret; 1962 } 1963 1964 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1965 { 1966 S390CPU *cpu = S390_CPU(cs); 1967 int ret = 0; 1968 1969 qemu_mutex_lock_iothread(); 1970 1971 kvm_cpu_synchronize_state(cs); 1972 1973 switch (run->exit_reason) { 1974 case KVM_EXIT_S390_SIEIC: 1975 ret = handle_intercept(cpu); 1976 break; 1977 case KVM_EXIT_S390_RESET: 1978 s390_ipl_reset_request(cs, S390_RESET_REIPL); 1979 break; 1980 case KVM_EXIT_S390_TSCH: 1981 ret = handle_tsch(cpu); 1982 break; 1983 case KVM_EXIT_S390_STSI: 1984 ret = handle_stsi(cpu); 1985 break; 1986 case KVM_EXIT_DEBUG: 1987 ret = kvm_arch_handle_debug_exit(cpu); 1988 break; 1989 default: 1990 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1991 break; 1992 } 1993 qemu_mutex_unlock_iothread(); 1994 1995 if (ret == 0) { 1996 ret = EXCP_INTERRUPT; 1997 } 1998 return ret; 1999 } 2000 2001 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 2002 { 2003 return true; 2004 } 2005 2006 void kvm_s390_enable_css_support(S390CPU *cpu) 2007 { 2008 int r; 2009 2010 /* Activate host kernel channel subsystem support. */ 2011 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 2012 assert(r == 0); 2013 } 2014 2015 void kvm_arch_init_irq_routing(KVMState *s) 2016 { 2017 /* 2018 * Note that while irqchip capabilities generally imply that cpustates 2019 * are handled in-kernel, it is not true for s390 (yet); therefore, we 2020 * have to override the common code kvm_halt_in_kernel_allowed setting. 2021 */ 2022 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2023 kvm_gsi_routing_allowed = true; 2024 kvm_halt_in_kernel_allowed = false; 2025 } 2026 } 2027 2028 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 2029 int vq, bool assign) 2030 { 2031 struct kvm_ioeventfd kick = { 2032 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 2033 KVM_IOEVENTFD_FLAG_DATAMATCH, 2034 .fd = event_notifier_get_fd(notifier), 2035 .datamatch = vq, 2036 .addr = sch, 2037 .len = 8, 2038 }; 2039 trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, 2040 kick.datamatch); 2041 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 2042 return -ENOSYS; 2043 } 2044 if (!assign) { 2045 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 2046 } 2047 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 2048 } 2049 2050 int kvm_s390_get_protected_dump(void) 2051 { 2052 return cap_protected_dump; 2053 } 2054 2055 int kvm_s390_get_ri(void) 2056 { 2057 return cap_ri; 2058 } 2059 2060 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2061 { 2062 struct kvm_mp_state mp_state = {}; 2063 int ret; 2064 2065 /* the kvm part might not have been initialized yet */ 2066 if (CPU(cpu)->kvm_state == NULL) { 2067 return 0; 2068 } 2069 2070 switch (cpu_state) { 2071 case S390_CPU_STATE_STOPPED: 2072 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2073 break; 2074 case S390_CPU_STATE_CHECK_STOP: 2075 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2076 break; 2077 case S390_CPU_STATE_OPERATING: 2078 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2079 break; 2080 case S390_CPU_STATE_LOAD: 2081 mp_state.mp_state = KVM_MP_STATE_LOAD; 2082 break; 2083 default: 2084 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2085 cpu_state); 2086 exit(1); 2087 } 2088 2089 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2090 if (ret) { 2091 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2092 strerror(-ret)); 2093 } 2094 2095 return ret; 2096 } 2097 2098 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2099 { 2100 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 2101 struct kvm_s390_irq_state irq_state = { 2102 .buf = (uint64_t) cpu->irqstate, 2103 .len = VCPU_IRQ_BUF_SIZE(max_cpus), 2104 }; 2105 CPUState *cs = CPU(cpu); 2106 int32_t bytes; 2107 2108 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2109 return; 2110 } 2111 2112 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2113 if (bytes < 0) { 2114 cpu->irqstate_saved_size = 0; 2115 error_report("Migration of interrupt state failed"); 2116 return; 2117 } 2118 2119 cpu->irqstate_saved_size = bytes; 2120 } 2121 2122 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2123 { 2124 CPUState *cs = CPU(cpu); 2125 struct kvm_s390_irq_state irq_state = { 2126 .buf = (uint64_t) cpu->irqstate, 2127 .len = cpu->irqstate_saved_size, 2128 }; 2129 int r; 2130 2131 if (cpu->irqstate_saved_size == 0) { 2132 return 0; 2133 } 2134 2135 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2136 return -ENOSYS; 2137 } 2138 2139 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2140 if (r) { 2141 error_report("Setting interrupt state failed %d", r); 2142 } 2143 return r; 2144 } 2145 2146 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2147 uint64_t address, uint32_t data, PCIDevice *dev) 2148 { 2149 S390PCIBusDevice *pbdev; 2150 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2151 2152 if (!dev) { 2153 DPRINTF("add_msi_route no pci device\n"); 2154 return -ENODEV; 2155 } 2156 2157 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2158 if (!pbdev) { 2159 DPRINTF("add_msi_route no zpci device\n"); 2160 return -ENODEV; 2161 } 2162 2163 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2164 route->flags = 0; 2165 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2166 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2167 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2168 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2169 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2170 return 0; 2171 } 2172 2173 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2174 int vector, PCIDevice *dev) 2175 { 2176 return 0; 2177 } 2178 2179 int kvm_arch_release_virq_post(int virq) 2180 { 2181 return 0; 2182 } 2183 2184 int kvm_arch_msi_data_to_gsi(uint32_t data) 2185 { 2186 abort(); 2187 } 2188 2189 static int query_cpu_subfunc(S390FeatBitmap features) 2190 { 2191 struct kvm_s390_vm_cpu_subfunc prop = {}; 2192 struct kvm_device_attr attr = { 2193 .group = KVM_S390_VM_CPU_MODEL, 2194 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2195 .addr = (uint64_t) &prop, 2196 }; 2197 int rc; 2198 2199 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2200 if (rc) { 2201 return rc; 2202 } 2203 2204 /* 2205 * We're going to add all subfunctions now, if the corresponding feature 2206 * is available that unlocks the query functions. 2207 */ 2208 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2209 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2210 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2211 } 2212 if (test_bit(S390_FEAT_MSA, features)) { 2213 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2214 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2215 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2216 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2217 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2218 } 2219 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2220 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2221 } 2222 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2223 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2224 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2225 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2226 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2227 } 2228 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2229 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2230 } 2231 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2232 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2233 } 2234 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2235 s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2236 } 2237 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2238 s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2239 } 2240 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2241 s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2242 } 2243 return 0; 2244 } 2245 2246 static int configure_cpu_subfunc(const S390FeatBitmap features) 2247 { 2248 struct kvm_s390_vm_cpu_subfunc prop = {}; 2249 struct kvm_device_attr attr = { 2250 .group = KVM_S390_VM_CPU_MODEL, 2251 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2252 .addr = (uint64_t) &prop, 2253 }; 2254 2255 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2256 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2257 /* hardware support might be missing, IBC will handle most of this */ 2258 return 0; 2259 } 2260 2261 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2262 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2263 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2264 } 2265 if (test_bit(S390_FEAT_MSA, features)) { 2266 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2267 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2268 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2269 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2270 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2271 } 2272 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2273 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2274 } 2275 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2276 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2277 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2278 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2279 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2280 } 2281 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2282 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2283 } 2284 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2285 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2286 } 2287 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2288 s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2289 } 2290 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2291 s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2292 } 2293 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2294 s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2295 } 2296 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2297 } 2298 2299 static int kvm_to_feat[][2] = { 2300 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2301 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2302 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2303 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2304 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2305 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2306 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2307 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2308 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2309 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2310 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2311 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2312 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2313 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2314 }; 2315 2316 static int query_cpu_feat(S390FeatBitmap features) 2317 { 2318 struct kvm_s390_vm_cpu_feat prop = {}; 2319 struct kvm_device_attr attr = { 2320 .group = KVM_S390_VM_CPU_MODEL, 2321 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2322 .addr = (uint64_t) &prop, 2323 }; 2324 int rc; 2325 int i; 2326 2327 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2328 if (rc) { 2329 return rc; 2330 } 2331 2332 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2333 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2334 set_bit(kvm_to_feat[i][1], features); 2335 } 2336 } 2337 return 0; 2338 } 2339 2340 static int configure_cpu_feat(const S390FeatBitmap features) 2341 { 2342 struct kvm_s390_vm_cpu_feat prop = {}; 2343 struct kvm_device_attr attr = { 2344 .group = KVM_S390_VM_CPU_MODEL, 2345 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2346 .addr = (uint64_t) &prop, 2347 }; 2348 int i; 2349 2350 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2351 if (test_bit(kvm_to_feat[i][1], features)) { 2352 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2353 } 2354 } 2355 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2356 } 2357 2358 bool kvm_s390_cpu_models_supported(void) 2359 { 2360 if (!cpu_model_allowed()) { 2361 /* compatibility machines interfere with the cpu model */ 2362 return false; 2363 } 2364 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2365 KVM_S390_VM_CPU_MACHINE) && 2366 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2367 KVM_S390_VM_CPU_PROCESSOR) && 2368 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2369 KVM_S390_VM_CPU_MACHINE_FEAT) && 2370 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2371 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2372 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2373 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2374 } 2375 2376 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2377 { 2378 struct kvm_s390_vm_cpu_machine prop = {}; 2379 struct kvm_device_attr attr = { 2380 .group = KVM_S390_VM_CPU_MODEL, 2381 .attr = KVM_S390_VM_CPU_MACHINE, 2382 .addr = (uint64_t) &prop, 2383 }; 2384 uint16_t unblocked_ibc = 0, cpu_type = 0; 2385 int rc; 2386 2387 memset(model, 0, sizeof(*model)); 2388 2389 if (!kvm_s390_cpu_models_supported()) { 2390 error_setg(errp, "KVM doesn't support CPU models"); 2391 return; 2392 } 2393 2394 /* query the basic cpu model properties */ 2395 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2396 if (rc) { 2397 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2398 return; 2399 } 2400 2401 cpu_type = cpuid_type(prop.cpuid); 2402 if (has_ibc(prop.ibc)) { 2403 model->lowest_ibc = lowest_ibc(prop.ibc); 2404 unblocked_ibc = unblocked_ibc(prop.ibc); 2405 } 2406 model->cpu_id = cpuid_id(prop.cpuid); 2407 model->cpu_id_format = cpuid_format(prop.cpuid); 2408 model->cpu_ver = 0xff; 2409 2410 /* get supported cpu features indicated via STFL(E) */ 2411 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2412 (uint8_t *) prop.fac_mask); 2413 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2414 if (test_bit(S390_FEAT_STFLE, model->features)) { 2415 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2416 } 2417 /* get supported cpu features indicated e.g. via SCLP */ 2418 rc = query_cpu_feat(model->features); 2419 if (rc) { 2420 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2421 return; 2422 } 2423 /* get supported cpu subfunctions indicated via query / test bit */ 2424 rc = query_cpu_subfunc(model->features); 2425 if (rc) { 2426 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2427 return; 2428 } 2429 2430 /* PTFF subfunctions might be indicated although kernel support missing */ 2431 if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { 2432 clear_bit(S390_FEAT_PTFF_QSIE, model->features); 2433 clear_bit(S390_FEAT_PTFF_QTOUE, model->features); 2434 clear_bit(S390_FEAT_PTFF_STOE, model->features); 2435 clear_bit(S390_FEAT_PTFF_STOUE, model->features); 2436 } 2437 2438 /* with cpu model support, CMM is only indicated if really available */ 2439 if (kvm_s390_cmma_available()) { 2440 set_bit(S390_FEAT_CMM, model->features); 2441 } else { 2442 /* no cmm -> no cmm nt */ 2443 clear_bit(S390_FEAT_CMM_NT, model->features); 2444 } 2445 2446 /* bpb needs kernel support for migration, VSIE and reset */ 2447 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { 2448 clear_bit(S390_FEAT_BPB, model->features); 2449 } 2450 2451 /* 2452 * If we have support for protected virtualization, indicate 2453 * the protected virtualization IPL unpack facility. 2454 */ 2455 if (cap_protected) { 2456 set_bit(S390_FEAT_UNPACK, model->features); 2457 } 2458 2459 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2460 set_bit(S390_FEAT_ZPCI, model->features); 2461 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2462 2463 if (s390_known_cpu_type(cpu_type)) { 2464 /* we want the exact model, even if some features are missing */ 2465 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2466 ibc_ec_ga(unblocked_ibc), NULL); 2467 } else { 2468 /* model unknown, e.g. too new - search using features */ 2469 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2470 ibc_ec_ga(unblocked_ibc), 2471 model->features); 2472 } 2473 if (!model->def) { 2474 error_setg(errp, "KVM: host CPU model could not be identified"); 2475 return; 2476 } 2477 /* for now, we can only provide the AP feature with HW support */ 2478 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, 2479 KVM_S390_VM_CRYPTO_ENABLE_APIE)) { 2480 set_bit(S390_FEAT_AP, model->features); 2481 } 2482 2483 /* 2484 * Extended-Length SCCB is handled entirely within QEMU. 2485 * For PV guests this is completely fenced by the Ultravisor, as Service 2486 * Call error checking and STFLE interpretation are handled via SIE. 2487 */ 2488 set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); 2489 2490 if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { 2491 set_bit(S390_FEAT_DIAG_318, model->features); 2492 } 2493 2494 /* strip of features that are not part of the maximum model */ 2495 bitmap_and(model->features, model->features, model->def->full_feat, 2496 S390_FEAT_MAX); 2497 } 2498 2499 static void kvm_s390_configure_apie(bool interpret) 2500 { 2501 uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : 2502 KVM_S390_VM_CRYPTO_DISABLE_APIE; 2503 2504 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 2505 kvm_s390_set_attr(attr); 2506 } 2507 } 2508 2509 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2510 { 2511 struct kvm_s390_vm_cpu_processor prop = { 2512 .fac_list = { 0 }, 2513 }; 2514 struct kvm_device_attr attr = { 2515 .group = KVM_S390_VM_CPU_MODEL, 2516 .attr = KVM_S390_VM_CPU_PROCESSOR, 2517 .addr = (uint64_t) &prop, 2518 }; 2519 int rc; 2520 2521 if (!model) { 2522 /* compatibility handling if cpu models are disabled */ 2523 if (kvm_s390_cmma_available()) { 2524 kvm_s390_enable_cmma(); 2525 } 2526 return; 2527 } 2528 if (!kvm_s390_cpu_models_supported()) { 2529 error_setg(errp, "KVM doesn't support CPU models"); 2530 return; 2531 } 2532 prop.cpuid = s390_cpuid_from_cpu_model(model); 2533 prop.ibc = s390_ibc_from_cpu_model(model); 2534 /* configure cpu features indicated via STFL(e) */ 2535 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2536 (uint8_t *) prop.fac_list); 2537 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2538 if (rc) { 2539 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2540 return; 2541 } 2542 /* configure cpu features indicated e.g. via SCLP */ 2543 rc = configure_cpu_feat(model->features); 2544 if (rc) { 2545 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2546 return; 2547 } 2548 /* configure cpu subfunctions indicated via query / test bit */ 2549 rc = configure_cpu_subfunc(model->features); 2550 if (rc) { 2551 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2552 return; 2553 } 2554 /* enable CMM via CMMA */ 2555 if (test_bit(S390_FEAT_CMM, model->features)) { 2556 kvm_s390_enable_cmma(); 2557 } 2558 2559 if (test_bit(S390_FEAT_AP, model->features)) { 2560 kvm_s390_configure_apie(true); 2561 } 2562 } 2563 2564 void kvm_s390_restart_interrupt(S390CPU *cpu) 2565 { 2566 struct kvm_s390_irq irq = { 2567 .type = KVM_S390_RESTART, 2568 }; 2569 2570 kvm_s390_vcpu_interrupt(cpu, &irq); 2571 } 2572 2573 void kvm_s390_stop_interrupt(S390CPU *cpu) 2574 { 2575 struct kvm_s390_irq irq = { 2576 .type = KVM_S390_SIGP_STOP, 2577 }; 2578 2579 kvm_s390_vcpu_interrupt(cpu, &irq); 2580 } 2581 2582 bool kvm_arch_cpu_check_are_resettable(void) 2583 { 2584 return true; 2585 } 2586 2587 int kvm_s390_get_zpci_op(void) 2588 { 2589 return cap_zpci_op; 2590 } 2591 2592 void kvm_arch_accel_class_init(ObjectClass *oc) 2593 { 2594 } 2595