1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include <linux/kvm.h> 25 #include <asm/ptrace.h> 26 27 #include "cpu.h" 28 #include "s390x-internal.h" 29 #include "kvm_s390x.h" 30 #include "sysemu/kvm_int.h" 31 #include "qemu/cutils.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "qemu/timer.h" 35 #include "qemu/units.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/mmap-alloc.h" 38 #include "qemu/log.h" 39 #include "sysemu/sysemu.h" 40 #include "sysemu/hw_accel.h" 41 #include "sysemu/runstate.h" 42 #include "sysemu/device_tree.h" 43 #include "exec/gdbstub.h" 44 #include "exec/ram_addr.h" 45 #include "trace.h" 46 #include "hw/s390x/s390-pci-inst.h" 47 #include "hw/s390x/s390-pci-bus.h" 48 #include "hw/s390x/ipl.h" 49 #include "hw/s390x/ebcdic.h" 50 #include "exec/memattrs.h" 51 #include "hw/s390x/s390-virtio-ccw.h" 52 #include "hw/s390x/s390-virtio-hcall.h" 53 #include "hw/s390x/pv.h" 54 55 #ifndef DEBUG_KVM 56 #define DEBUG_KVM 0 57 #endif 58 59 #define DPRINTF(fmt, ...) do { \ 60 if (DEBUG_KVM) { \ 61 fprintf(stderr, fmt, ## __VA_ARGS__); \ 62 } \ 63 } while (0) 64 65 #define kvm_vm_check_mem_attr(s, attr) \ 66 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 67 68 #define IPA0_DIAG 0x8300 69 #define IPA0_SIGP 0xae00 70 #define IPA0_B2 0xb200 71 #define IPA0_B9 0xb900 72 #define IPA0_EB 0xeb00 73 #define IPA0_E3 0xe300 74 75 #define PRIV_B2_SCLP_CALL 0x20 76 #define PRIV_B2_CSCH 0x30 77 #define PRIV_B2_HSCH 0x31 78 #define PRIV_B2_MSCH 0x32 79 #define PRIV_B2_SSCH 0x33 80 #define PRIV_B2_STSCH 0x34 81 #define PRIV_B2_TSCH 0x35 82 #define PRIV_B2_TPI 0x36 83 #define PRIV_B2_SAL 0x37 84 #define PRIV_B2_RSCH 0x38 85 #define PRIV_B2_STCRW 0x39 86 #define PRIV_B2_STCPS 0x3a 87 #define PRIV_B2_RCHP 0x3b 88 #define PRIV_B2_SCHM 0x3c 89 #define PRIV_B2_CHSC 0x5f 90 #define PRIV_B2_SIGA 0x74 91 #define PRIV_B2_XSCH 0x76 92 93 #define PRIV_EB_SQBS 0x8a 94 #define PRIV_EB_PCISTB 0xd0 95 #define PRIV_EB_SIC 0xd1 96 97 #define PRIV_B9_EQBS 0x9c 98 #define PRIV_B9_CLP 0xa0 99 #define PRIV_B9_PCISTG 0xd0 100 #define PRIV_B9_PCILG 0xd2 101 #define PRIV_B9_RPCIT 0xd3 102 103 #define PRIV_E3_MPCIFC 0xd0 104 #define PRIV_E3_STPCIFC 0xd4 105 106 #define DIAG_TIMEREVENT 0x288 107 #define DIAG_IPL 0x308 108 #define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 109 #define DIAG_KVM_HYPERCALL 0x500 110 #define DIAG_KVM_BREAKPOINT 0x501 111 112 #define ICPT_INSTRUCTION 0x04 113 #define ICPT_PROGRAM 0x08 114 #define ICPT_EXT_INT 0x14 115 #define ICPT_WAITPSW 0x1c 116 #define ICPT_SOFT_INTERCEPT 0x24 117 #define ICPT_CPU_STOP 0x28 118 #define ICPT_OPEREXC 0x2c 119 #define ICPT_IO 0x40 120 #define ICPT_PV_INSTR 0x68 121 #define ICPT_PV_INSTR_NOTIFICATION 0x6c 122 123 #define NR_LOCAL_IRQS 32 124 /* 125 * Needs to be big enough to contain max_cpus emergency signals 126 * and in addition NR_LOCAL_IRQS interrupts 127 */ 128 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ 129 (max_cpus + NR_LOCAL_IRQS)) 130 /* 131 * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages 132 * as the dirty bitmap must be managed by bitops that take an int as 133 * position indicator. This would end at an unaligned address 134 * (0x7fffff00000). As future variants might provide larger pages 135 * and to make all addresses properly aligned, let us split at 4TB. 136 */ 137 #define KVM_SLOT_MAX_BYTES (4UL * TiB) 138 139 static CPUWatchpoint hw_watchpoint; 140 /* 141 * We don't use a list because this structure is also used to transmit the 142 * hardware breakpoints to the kernel. 143 */ 144 static struct kvm_hw_breakpoint *hw_breakpoints; 145 static int nb_hw_breakpoints; 146 147 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 148 KVM_CAP_LAST_INFO 149 }; 150 151 static int cap_sync_regs; 152 static int cap_async_pf; 153 static int cap_mem_op; 154 static int cap_mem_op_extension; 155 static int cap_s390_irq; 156 static int cap_ri; 157 static int cap_hpage_1m; 158 static int cap_vcpu_resets; 159 static int cap_protected; 160 static int cap_zpci_op; 161 162 static bool mem_op_storage_key_support; 163 164 static int active_cmma; 165 166 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 167 { 168 struct kvm_device_attr attr = { 169 .group = KVM_S390_VM_MEM_CTRL, 170 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 171 .addr = (uint64_t) memory_limit, 172 }; 173 174 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 175 } 176 177 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 178 { 179 int rc; 180 181 struct kvm_device_attr attr = { 182 .group = KVM_S390_VM_MEM_CTRL, 183 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 184 .addr = (uint64_t) &new_limit, 185 }; 186 187 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 188 return 0; 189 } 190 191 rc = kvm_s390_query_mem_limit(hw_limit); 192 if (rc) { 193 return rc; 194 } else if (*hw_limit < new_limit) { 195 return -E2BIG; 196 } 197 198 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 199 } 200 201 int kvm_s390_cmma_active(void) 202 { 203 return active_cmma; 204 } 205 206 static bool kvm_s390_cmma_available(void) 207 { 208 static bool initialized, value; 209 210 if (!initialized) { 211 initialized = true; 212 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 213 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 214 } 215 return value; 216 } 217 218 void kvm_s390_cmma_reset(void) 219 { 220 int rc; 221 struct kvm_device_attr attr = { 222 .group = KVM_S390_VM_MEM_CTRL, 223 .attr = KVM_S390_VM_MEM_CLR_CMMA, 224 }; 225 226 if (!kvm_s390_cmma_active()) { 227 return; 228 } 229 230 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 231 trace_kvm_clear_cmma(rc); 232 } 233 234 static void kvm_s390_enable_cmma(void) 235 { 236 int rc; 237 struct kvm_device_attr attr = { 238 .group = KVM_S390_VM_MEM_CTRL, 239 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 240 }; 241 242 if (cap_hpage_1m) { 243 warn_report("CMM will not be enabled because it is not " 244 "compatible with huge memory backings."); 245 return; 246 } 247 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 248 active_cmma = !rc; 249 trace_kvm_enable_cmma(rc); 250 } 251 252 static void kvm_s390_set_attr(uint64_t attr) 253 { 254 struct kvm_device_attr attribute = { 255 .group = KVM_S390_VM_CRYPTO, 256 .attr = attr, 257 }; 258 259 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 260 261 if (ret) { 262 error_report("Failed to set crypto device attribute %lu: %s", 263 attr, strerror(-ret)); 264 } 265 } 266 267 static void kvm_s390_init_aes_kw(void) 268 { 269 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 270 271 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 272 NULL)) { 273 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 274 } 275 276 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 277 kvm_s390_set_attr(attr); 278 } 279 } 280 281 static void kvm_s390_init_dea_kw(void) 282 { 283 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 284 285 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 286 NULL)) { 287 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 288 } 289 290 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 291 kvm_s390_set_attr(attr); 292 } 293 } 294 295 void kvm_s390_crypto_reset(void) 296 { 297 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 298 kvm_s390_init_aes_kw(); 299 kvm_s390_init_dea_kw(); 300 } 301 } 302 303 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) 304 { 305 if (pagesize == 4 * KiB) { 306 return; 307 } 308 309 if (!hpage_1m_allowed()) { 310 error_setg(errp, "This QEMU machine does not support huge page " 311 "mappings"); 312 return; 313 } 314 315 if (pagesize != 1 * MiB) { 316 error_setg(errp, "Memory backing with 2G pages was specified, " 317 "but KVM does not support this memory backing"); 318 return; 319 } 320 321 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { 322 error_setg(errp, "Memory backing with 1M pages was specified, " 323 "but KVM does not support this memory backing"); 324 return; 325 } 326 327 cap_hpage_1m = 1; 328 } 329 330 int kvm_s390_get_hpage_1m(void) 331 { 332 return cap_hpage_1m; 333 } 334 335 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) 336 { 337 MachineClass *mc = MACHINE_CLASS(oc); 338 339 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 340 } 341 342 int kvm_arch_init(MachineState *ms, KVMState *s) 343 { 344 object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, 345 false, NULL); 346 347 if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { 348 error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " 349 "please use kernel 3.15 or newer"); 350 return -1; 351 } 352 if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { 353 error_report("KVM is missing capability KVM_CAP_S390_COW - " 354 "unsupported environment"); 355 return -1; 356 } 357 358 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); 359 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 360 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 361 cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); 362 mem_op_storage_key_support = cap_mem_op_extension > 0; 363 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 364 cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); 365 cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); 366 cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP); 367 368 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 369 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 370 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 371 if (ri_allowed()) { 372 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 373 cap_ri = 1; 374 } 375 } 376 if (cpu_model_allowed()) { 377 kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); 378 } 379 380 /* 381 * The migration interface for ais was introduced with kernel 4.13 382 * but the capability itself had been active since 4.12. As migration 383 * support is considered necessary, we only try to enable this for 384 * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. 385 */ 386 if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && 387 kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { 388 kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); 389 } 390 391 kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); 392 return 0; 393 } 394 395 int kvm_arch_irqchip_create(KVMState *s) 396 { 397 return 0; 398 } 399 400 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 401 { 402 return cpu->cpu_index; 403 } 404 405 int kvm_arch_init_vcpu(CPUState *cs) 406 { 407 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 408 S390CPU *cpu = S390_CPU(cs); 409 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 410 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); 411 return 0; 412 } 413 414 int kvm_arch_destroy_vcpu(CPUState *cs) 415 { 416 S390CPU *cpu = S390_CPU(cs); 417 418 g_free(cpu->irqstate); 419 cpu->irqstate = NULL; 420 421 return 0; 422 } 423 424 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) 425 { 426 CPUState *cs = CPU(cpu); 427 428 /* 429 * The reset call is needed here to reset in-kernel vcpu data that 430 * we can't access directly from QEMU (i.e. with older kernels 431 * which don't support sync_regs/ONE_REG). Before this ioctl 432 * cpu_synchronize_state() is called in common kvm code 433 * (kvm-all). 434 */ 435 if (kvm_vcpu_ioctl(cs, type)) { 436 error_report("CPU reset failed on CPU %i type %lx", 437 cs->cpu_index, type); 438 } 439 } 440 441 void kvm_s390_reset_vcpu_initial(S390CPU *cpu) 442 { 443 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 444 } 445 446 void kvm_s390_reset_vcpu_clear(S390CPU *cpu) 447 { 448 if (cap_vcpu_resets) { 449 kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); 450 } else { 451 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 452 } 453 } 454 455 void kvm_s390_reset_vcpu_normal(S390CPU *cpu) 456 { 457 if (cap_vcpu_resets) { 458 kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); 459 } 460 } 461 462 static int can_sync_regs(CPUState *cs, int regs) 463 { 464 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; 465 } 466 467 int kvm_arch_put_registers(CPUState *cs, int level) 468 { 469 S390CPU *cpu = S390_CPU(cs); 470 CPUS390XState *env = &cpu->env; 471 struct kvm_sregs sregs; 472 struct kvm_regs regs; 473 struct kvm_fpu fpu = {}; 474 int r; 475 int i; 476 477 /* always save the PSW and the GPRS*/ 478 cs->kvm_run->psw_addr = env->psw.addr; 479 cs->kvm_run->psw_mask = env->psw.mask; 480 481 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 482 for (i = 0; i < 16; i++) { 483 cs->kvm_run->s.regs.gprs[i] = env->regs[i]; 484 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 485 } 486 } else { 487 for (i = 0; i < 16; i++) { 488 regs.gprs[i] = env->regs[i]; 489 } 490 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 491 if (r < 0) { 492 return r; 493 } 494 } 495 496 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 497 for (i = 0; i < 32; i++) { 498 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; 499 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; 500 } 501 cs->kvm_run->s.regs.fpc = env->fpc; 502 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 503 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 504 for (i = 0; i < 16; i++) { 505 cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); 506 } 507 cs->kvm_run->s.regs.fpc = env->fpc; 508 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 509 } else { 510 /* Floating point */ 511 for (i = 0; i < 16; i++) { 512 fpu.fprs[i] = *get_freg(env, i); 513 } 514 fpu.fpc = env->fpc; 515 516 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 517 if (r < 0) { 518 return r; 519 } 520 } 521 522 /* Do we need to save more than that? */ 523 if (level == KVM_PUT_RUNTIME_STATE) { 524 return 0; 525 } 526 527 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 528 cs->kvm_run->s.regs.cputm = env->cputm; 529 cs->kvm_run->s.regs.ckc = env->ckc; 530 cs->kvm_run->s.regs.todpr = env->todpr; 531 cs->kvm_run->s.regs.gbea = env->gbea; 532 cs->kvm_run->s.regs.pp = env->pp; 533 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 534 } else { 535 /* 536 * These ONE_REGS are not protected by a capability. As they are only 537 * necessary for migration we just trace a possible error, but don't 538 * return with an error return code. 539 */ 540 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 541 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 542 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 543 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 544 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 545 } 546 547 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 548 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 549 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 550 } 551 552 /* pfault parameters */ 553 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 554 cs->kvm_run->s.regs.pft = env->pfault_token; 555 cs->kvm_run->s.regs.pfs = env->pfault_select; 556 cs->kvm_run->s.regs.pfc = env->pfault_compare; 557 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 558 } else if (cap_async_pf) { 559 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 560 if (r < 0) { 561 return r; 562 } 563 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 564 if (r < 0) { 565 return r; 566 } 567 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 568 if (r < 0) { 569 return r; 570 } 571 } 572 573 /* access registers and control registers*/ 574 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 575 for (i = 0; i < 16; i++) { 576 cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; 577 cs->kvm_run->s.regs.crs[i] = env->cregs[i]; 578 } 579 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; 580 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; 581 } else { 582 for (i = 0; i < 16; i++) { 583 sregs.acrs[i] = env->aregs[i]; 584 sregs.crs[i] = env->cregs[i]; 585 } 586 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 587 if (r < 0) { 588 return r; 589 } 590 } 591 592 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 593 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 594 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 595 } 596 597 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 598 cs->kvm_run->s.regs.bpbc = env->bpbc; 599 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; 600 } 601 602 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 603 cs->kvm_run->s.regs.etoken = env->etoken; 604 cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; 605 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; 606 } 607 608 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 609 cs->kvm_run->s.regs.diag318 = env->diag318_info; 610 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 611 } 612 613 /* Finally the prefix */ 614 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 615 cs->kvm_run->s.regs.prefix = env->psa; 616 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; 617 } else { 618 /* prefix is only supported via sync regs */ 619 } 620 return 0; 621 } 622 623 int kvm_arch_get_registers(CPUState *cs) 624 { 625 S390CPU *cpu = S390_CPU(cs); 626 CPUS390XState *env = &cpu->env; 627 struct kvm_sregs sregs; 628 struct kvm_regs regs; 629 struct kvm_fpu fpu; 630 int i, r; 631 632 /* get the PSW */ 633 env->psw.addr = cs->kvm_run->psw_addr; 634 env->psw.mask = cs->kvm_run->psw_mask; 635 636 /* the GPRS */ 637 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 638 for (i = 0; i < 16; i++) { 639 env->regs[i] = cs->kvm_run->s.regs.gprs[i]; 640 } 641 } else { 642 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 643 if (r < 0) { 644 return r; 645 } 646 for (i = 0; i < 16; i++) { 647 env->regs[i] = regs.gprs[i]; 648 } 649 } 650 651 /* The ACRS and CRS */ 652 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 653 for (i = 0; i < 16; i++) { 654 env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; 655 env->cregs[i] = cs->kvm_run->s.regs.crs[i]; 656 } 657 } else { 658 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 659 if (r < 0) { 660 return r; 661 } 662 for (i = 0; i < 16; i++) { 663 env->aregs[i] = sregs.acrs[i]; 664 env->cregs[i] = sregs.crs[i]; 665 } 666 } 667 668 /* Floating point and vector registers */ 669 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 670 for (i = 0; i < 32; i++) { 671 env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; 672 env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; 673 } 674 env->fpc = cs->kvm_run->s.regs.fpc; 675 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 676 for (i = 0; i < 16; i++) { 677 *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; 678 } 679 env->fpc = cs->kvm_run->s.regs.fpc; 680 } else { 681 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 682 if (r < 0) { 683 return r; 684 } 685 for (i = 0; i < 16; i++) { 686 *get_freg(env, i) = fpu.fprs[i]; 687 } 688 env->fpc = fpu.fpc; 689 } 690 691 /* The prefix */ 692 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 693 env->psa = cs->kvm_run->s.regs.prefix; 694 } 695 696 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 697 env->cputm = cs->kvm_run->s.regs.cputm; 698 env->ckc = cs->kvm_run->s.regs.ckc; 699 env->todpr = cs->kvm_run->s.regs.todpr; 700 env->gbea = cs->kvm_run->s.regs.gbea; 701 env->pp = cs->kvm_run->s.regs.pp; 702 } else { 703 /* 704 * These ONE_REGS are not protected by a capability. As they are only 705 * necessary for migration we just trace a possible error, but don't 706 * return with an error return code. 707 */ 708 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 709 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 710 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 711 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 712 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 713 } 714 715 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 716 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 717 } 718 719 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 720 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 721 } 722 723 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 724 env->bpbc = cs->kvm_run->s.regs.bpbc; 725 } 726 727 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 728 env->etoken = cs->kvm_run->s.regs.etoken; 729 env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; 730 } 731 732 /* pfault parameters */ 733 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 734 env->pfault_token = cs->kvm_run->s.regs.pft; 735 env->pfault_select = cs->kvm_run->s.regs.pfs; 736 env->pfault_compare = cs->kvm_run->s.regs.pfc; 737 } else if (cap_async_pf) { 738 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 739 if (r < 0) { 740 return r; 741 } 742 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 743 if (r < 0) { 744 return r; 745 } 746 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 747 if (r < 0) { 748 return r; 749 } 750 } 751 752 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 753 env->diag318_info = cs->kvm_run->s.regs.diag318; 754 } 755 756 return 0; 757 } 758 759 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 760 { 761 int r; 762 struct kvm_device_attr attr = { 763 .group = KVM_S390_VM_TOD, 764 .attr = KVM_S390_VM_TOD_LOW, 765 .addr = (uint64_t)tod_low, 766 }; 767 768 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 769 if (r) { 770 return r; 771 } 772 773 attr.attr = KVM_S390_VM_TOD_HIGH; 774 attr.addr = (uint64_t)tod_high; 775 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 776 } 777 778 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 779 { 780 int r; 781 struct kvm_s390_vm_tod_clock gtod; 782 struct kvm_device_attr attr = { 783 .group = KVM_S390_VM_TOD, 784 .attr = KVM_S390_VM_TOD_EXT, 785 .addr = (uint64_t)>od, 786 }; 787 788 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 789 *tod_high = gtod.epoch_idx; 790 *tod_low = gtod.tod; 791 792 return r; 793 } 794 795 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) 796 { 797 int r; 798 struct kvm_device_attr attr = { 799 .group = KVM_S390_VM_TOD, 800 .attr = KVM_S390_VM_TOD_LOW, 801 .addr = (uint64_t)&tod_low, 802 }; 803 804 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 805 if (r) { 806 return r; 807 } 808 809 attr.attr = KVM_S390_VM_TOD_HIGH; 810 attr.addr = (uint64_t)&tod_high; 811 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 812 } 813 814 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) 815 { 816 struct kvm_s390_vm_tod_clock gtod = { 817 .epoch_idx = tod_high, 818 .tod = tod_low, 819 }; 820 struct kvm_device_attr attr = { 821 .group = KVM_S390_VM_TOD, 822 .attr = KVM_S390_VM_TOD_EXT, 823 .addr = (uint64_t)>od, 824 }; 825 826 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 827 } 828 829 /** 830 * kvm_s390_mem_op: 831 * @addr: the logical start address in guest memory 832 * @ar: the access register number 833 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 834 * @len: length that should be transferred 835 * @is_write: true = write, false = read 836 * Returns: 0 on success, non-zero if an exception or error occurred 837 * 838 * Use KVM ioctl to read/write from/to guest memory. An access exception 839 * is injected into the vCPU in case of translation errors. 840 */ 841 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 842 int len, bool is_write) 843 { 844 struct kvm_s390_mem_op mem_op = { 845 .gaddr = addr, 846 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 847 .size = len, 848 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 849 : KVM_S390_MEMOP_LOGICAL_READ, 850 .buf = (uint64_t)hostbuf, 851 .ar = ar, 852 .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, 853 }; 854 int ret; 855 856 if (!cap_mem_op) { 857 return -ENOSYS; 858 } 859 if (!hostbuf) { 860 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 861 } 862 if (mem_op_storage_key_support) { 863 mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; 864 } 865 866 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 867 if (ret < 0) { 868 warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 869 } 870 return ret; 871 } 872 873 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, 874 int len, bool is_write) 875 { 876 struct kvm_s390_mem_op mem_op = { 877 .sida_offset = offset, 878 .size = len, 879 .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE 880 : KVM_S390_MEMOP_SIDA_READ, 881 .buf = (uint64_t)hostbuf, 882 }; 883 int ret; 884 885 if (!cap_mem_op || !cap_protected) { 886 return -ENOSYS; 887 } 888 889 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 890 if (ret < 0) { 891 error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 892 abort(); 893 } 894 return ret; 895 } 896 897 static uint8_t const *sw_bp_inst; 898 static uint8_t sw_bp_ilen; 899 900 static void determine_sw_breakpoint_instr(void) 901 { 902 /* DIAG 501 is used for sw breakpoints with old kernels */ 903 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 904 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 905 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 906 907 if (sw_bp_inst) { 908 return; 909 } 910 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 911 sw_bp_inst = diag_501; 912 sw_bp_ilen = sizeof(diag_501); 913 DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); 914 } else { 915 sw_bp_inst = instr_0x0000; 916 sw_bp_ilen = sizeof(instr_0x0000); 917 DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); 918 } 919 } 920 921 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 922 { 923 determine_sw_breakpoint_instr(); 924 925 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 926 sw_bp_ilen, 0) || 927 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 928 return -EINVAL; 929 } 930 return 0; 931 } 932 933 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 934 { 935 uint8_t t[MAX_ILEN]; 936 937 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 938 return -EINVAL; 939 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 940 return -EINVAL; 941 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 942 sw_bp_ilen, 1)) { 943 return -EINVAL; 944 } 945 946 return 0; 947 } 948 949 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 950 int len, int type) 951 { 952 int n; 953 954 for (n = 0; n < nb_hw_breakpoints; n++) { 955 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 956 (hw_breakpoints[n].len == len || len == -1)) { 957 return &hw_breakpoints[n]; 958 } 959 } 960 961 return NULL; 962 } 963 964 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 965 { 966 int size; 967 968 if (find_hw_breakpoint(addr, len, type)) { 969 return -EEXIST; 970 } 971 972 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 973 974 if (!hw_breakpoints) { 975 nb_hw_breakpoints = 0; 976 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 977 } else { 978 hw_breakpoints = 979 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 980 } 981 982 if (!hw_breakpoints) { 983 nb_hw_breakpoints = 0; 984 return -ENOMEM; 985 } 986 987 hw_breakpoints[nb_hw_breakpoints].addr = addr; 988 hw_breakpoints[nb_hw_breakpoints].len = len; 989 hw_breakpoints[nb_hw_breakpoints].type = type; 990 991 nb_hw_breakpoints++; 992 993 return 0; 994 } 995 996 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 997 target_ulong len, int type) 998 { 999 switch (type) { 1000 case GDB_BREAKPOINT_HW: 1001 type = KVM_HW_BP; 1002 break; 1003 case GDB_WATCHPOINT_WRITE: 1004 if (len < 1) { 1005 return -EINVAL; 1006 } 1007 type = KVM_HW_WP_WRITE; 1008 break; 1009 default: 1010 return -ENOSYS; 1011 } 1012 return insert_hw_breakpoint(addr, len, type); 1013 } 1014 1015 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 1016 target_ulong len, int type) 1017 { 1018 int size; 1019 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 1020 1021 if (bp == NULL) { 1022 return -ENOENT; 1023 } 1024 1025 nb_hw_breakpoints--; 1026 if (nb_hw_breakpoints > 0) { 1027 /* 1028 * In order to trim the array, move the last element to the position to 1029 * be removed - if necessary. 1030 */ 1031 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 1032 *bp = hw_breakpoints[nb_hw_breakpoints]; 1033 } 1034 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 1035 hw_breakpoints = 1036 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); 1037 } else { 1038 g_free(hw_breakpoints); 1039 hw_breakpoints = NULL; 1040 } 1041 1042 return 0; 1043 } 1044 1045 void kvm_arch_remove_all_hw_breakpoints(void) 1046 { 1047 nb_hw_breakpoints = 0; 1048 g_free(hw_breakpoints); 1049 hw_breakpoints = NULL; 1050 } 1051 1052 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 1053 { 1054 int i; 1055 1056 if (nb_hw_breakpoints > 0) { 1057 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 1058 dbg->arch.hw_bp = hw_breakpoints; 1059 1060 for (i = 0; i < nb_hw_breakpoints; ++i) { 1061 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 1062 hw_breakpoints[i].addr); 1063 } 1064 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 1065 } else { 1066 dbg->arch.nr_hw_bp = 0; 1067 dbg->arch.hw_bp = NULL; 1068 } 1069 } 1070 1071 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 1072 { 1073 } 1074 1075 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1076 { 1077 return MEMTXATTRS_UNSPECIFIED; 1078 } 1079 1080 int kvm_arch_process_async_events(CPUState *cs) 1081 { 1082 return cs->halted; 1083 } 1084 1085 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 1086 struct kvm_s390_interrupt *interrupt) 1087 { 1088 int r = 0; 1089 1090 interrupt->type = irq->type; 1091 switch (irq->type) { 1092 case KVM_S390_INT_VIRTIO: 1093 interrupt->parm = irq->u.ext.ext_params; 1094 /* fall through */ 1095 case KVM_S390_INT_PFAULT_INIT: 1096 case KVM_S390_INT_PFAULT_DONE: 1097 interrupt->parm64 = irq->u.ext.ext_params2; 1098 break; 1099 case KVM_S390_PROGRAM_INT: 1100 interrupt->parm = irq->u.pgm.code; 1101 break; 1102 case KVM_S390_SIGP_SET_PREFIX: 1103 interrupt->parm = irq->u.prefix.address; 1104 break; 1105 case KVM_S390_INT_SERVICE: 1106 interrupt->parm = irq->u.ext.ext_params; 1107 break; 1108 case KVM_S390_MCHK: 1109 interrupt->parm = irq->u.mchk.cr14; 1110 interrupt->parm64 = irq->u.mchk.mcic; 1111 break; 1112 case KVM_S390_INT_EXTERNAL_CALL: 1113 interrupt->parm = irq->u.extcall.code; 1114 break; 1115 case KVM_S390_INT_EMERGENCY: 1116 interrupt->parm = irq->u.emerg.code; 1117 break; 1118 case KVM_S390_SIGP_STOP: 1119 case KVM_S390_RESTART: 1120 break; /* These types have no parameters */ 1121 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1122 interrupt->parm = irq->u.io.subchannel_id << 16; 1123 interrupt->parm |= irq->u.io.subchannel_nr; 1124 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 1125 interrupt->parm64 |= irq->u.io.io_int_word; 1126 break; 1127 default: 1128 r = -EINVAL; 1129 break; 1130 } 1131 return r; 1132 } 1133 1134 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 1135 { 1136 struct kvm_s390_interrupt kvmint = {}; 1137 int r; 1138 1139 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1140 if (r < 0) { 1141 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1142 exit(1); 1143 } 1144 1145 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1146 if (r < 0) { 1147 fprintf(stderr, "KVM failed to inject interrupt\n"); 1148 exit(1); 1149 } 1150 } 1151 1152 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1153 { 1154 CPUState *cs = CPU(cpu); 1155 int r; 1156 1157 if (cap_s390_irq) { 1158 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1159 if (!r) { 1160 return; 1161 } 1162 error_report("KVM failed to inject interrupt %llx", irq->type); 1163 exit(1); 1164 } 1165 1166 inject_vcpu_irq_legacy(cs, irq); 1167 } 1168 1169 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) 1170 { 1171 struct kvm_s390_interrupt kvmint = {}; 1172 int r; 1173 1174 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1175 if (r < 0) { 1176 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1177 exit(1); 1178 } 1179 1180 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1181 if (r < 0) { 1182 fprintf(stderr, "KVM failed to inject interrupt\n"); 1183 exit(1); 1184 } 1185 } 1186 1187 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1188 { 1189 struct kvm_s390_irq irq = { 1190 .type = KVM_S390_PROGRAM_INT, 1191 .u.pgm.code = code, 1192 }; 1193 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", 1194 cpu->env.psw.addr); 1195 kvm_s390_vcpu_interrupt(cpu, &irq); 1196 } 1197 1198 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1199 { 1200 struct kvm_s390_irq irq = { 1201 .type = KVM_S390_PROGRAM_INT, 1202 .u.pgm.code = code, 1203 .u.pgm.trans_exc_code = te_code, 1204 .u.pgm.exc_access_id = te_code & 3, 1205 }; 1206 1207 kvm_s390_vcpu_interrupt(cpu, &irq); 1208 } 1209 1210 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1211 uint16_t ipbh0) 1212 { 1213 CPUS390XState *env = &cpu->env; 1214 uint64_t sccb; 1215 uint32_t code; 1216 int r; 1217 1218 sccb = env->regs[ipbh0 & 0xf]; 1219 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1220 1221 switch (run->s390_sieic.icptcode) { 1222 case ICPT_PV_INSTR_NOTIFICATION: 1223 g_assert(s390_is_pv()); 1224 /* The notification intercepts are currently handled by KVM */ 1225 error_report("unexpected SCLP PV notification"); 1226 exit(1); 1227 break; 1228 case ICPT_PV_INSTR: 1229 g_assert(s390_is_pv()); 1230 sclp_service_call_protected(env, sccb, code); 1231 /* Setting the CC is done by the Ultravisor. */ 1232 break; 1233 case ICPT_INSTRUCTION: 1234 g_assert(!s390_is_pv()); 1235 r = sclp_service_call(env, sccb, code); 1236 if (r < 0) { 1237 kvm_s390_program_interrupt(cpu, -r); 1238 return; 1239 } 1240 setcc(cpu, r); 1241 } 1242 } 1243 1244 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1245 { 1246 CPUS390XState *env = &cpu->env; 1247 int rc = 0; 1248 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1249 1250 switch (ipa1) { 1251 case PRIV_B2_XSCH: 1252 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1253 break; 1254 case PRIV_B2_CSCH: 1255 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1256 break; 1257 case PRIV_B2_HSCH: 1258 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1259 break; 1260 case PRIV_B2_MSCH: 1261 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1262 break; 1263 case PRIV_B2_SSCH: 1264 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1265 break; 1266 case PRIV_B2_STCRW: 1267 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1268 break; 1269 case PRIV_B2_STSCH: 1270 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1271 break; 1272 case PRIV_B2_TSCH: 1273 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1274 fprintf(stderr, "Spurious tsch intercept\n"); 1275 break; 1276 case PRIV_B2_CHSC: 1277 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1278 break; 1279 case PRIV_B2_TPI: 1280 /* This should have been handled by kvm already. */ 1281 fprintf(stderr, "Spurious tpi intercept\n"); 1282 break; 1283 case PRIV_B2_SCHM: 1284 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1285 run->s390_sieic.ipb, RA_IGNORED); 1286 break; 1287 case PRIV_B2_RSCH: 1288 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1289 break; 1290 case PRIV_B2_RCHP: 1291 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1292 break; 1293 case PRIV_B2_STCPS: 1294 /* We do not provide this instruction, it is suppressed. */ 1295 break; 1296 case PRIV_B2_SAL: 1297 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1298 break; 1299 case PRIV_B2_SIGA: 1300 /* Not provided, set CC = 3 for subchannel not operational */ 1301 setcc(cpu, 3); 1302 break; 1303 case PRIV_B2_SCLP_CALL: 1304 kvm_sclp_service_call(cpu, run, ipbh0); 1305 break; 1306 default: 1307 rc = -1; 1308 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); 1309 break; 1310 } 1311 1312 return rc; 1313 } 1314 1315 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1316 uint8_t *ar) 1317 { 1318 CPUS390XState *env = &cpu->env; 1319 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1320 uint32_t base2 = run->s390_sieic.ipb >> 28; 1321 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1322 ((run->s390_sieic.ipb & 0xff00) << 4); 1323 1324 if (disp2 & 0x80000) { 1325 disp2 += 0xfff00000; 1326 } 1327 if (ar) { 1328 *ar = base2; 1329 } 1330 1331 return (base2 ? env->regs[base2] : 0) + 1332 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1333 } 1334 1335 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1336 uint8_t *ar) 1337 { 1338 CPUS390XState *env = &cpu->env; 1339 uint32_t base2 = run->s390_sieic.ipb >> 28; 1340 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1341 ((run->s390_sieic.ipb & 0xff00) << 4); 1342 1343 if (disp2 & 0x80000) { 1344 disp2 += 0xfff00000; 1345 } 1346 if (ar) { 1347 *ar = base2; 1348 } 1349 1350 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1351 } 1352 1353 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1354 { 1355 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1356 1357 if (s390_has_feat(S390_FEAT_ZPCI)) { 1358 return clp_service_call(cpu, r2, RA_IGNORED); 1359 } else { 1360 return -1; 1361 } 1362 } 1363 1364 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1365 { 1366 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1367 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1368 1369 if (s390_has_feat(S390_FEAT_ZPCI)) { 1370 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1371 } else { 1372 return -1; 1373 } 1374 } 1375 1376 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1377 { 1378 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1379 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1380 1381 if (s390_has_feat(S390_FEAT_ZPCI)) { 1382 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1383 } else { 1384 return -1; 1385 } 1386 } 1387 1388 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1389 { 1390 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1391 uint64_t fiba; 1392 uint8_t ar; 1393 1394 if (s390_has_feat(S390_FEAT_ZPCI)) { 1395 fiba = get_base_disp_rxy(cpu, run, &ar); 1396 1397 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1398 } else { 1399 return -1; 1400 } 1401 } 1402 1403 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1404 { 1405 CPUS390XState *env = &cpu->env; 1406 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1407 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1408 uint8_t isc; 1409 uint16_t mode; 1410 int r; 1411 1412 mode = env->regs[r1] & 0xffff; 1413 isc = (env->regs[r3] >> 27) & 0x7; 1414 r = css_do_sic(env, isc, mode); 1415 if (r) { 1416 kvm_s390_program_interrupt(cpu, -r); 1417 } 1418 1419 return 0; 1420 } 1421 1422 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1423 { 1424 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1425 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1426 1427 if (s390_has_feat(S390_FEAT_ZPCI)) { 1428 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1429 } else { 1430 return -1; 1431 } 1432 } 1433 1434 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1435 { 1436 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1437 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1438 uint64_t gaddr; 1439 uint8_t ar; 1440 1441 if (s390_has_feat(S390_FEAT_ZPCI)) { 1442 gaddr = get_base_disp_rsy(cpu, run, &ar); 1443 1444 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1445 } else { 1446 return -1; 1447 } 1448 } 1449 1450 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1451 { 1452 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1453 uint64_t fiba; 1454 uint8_t ar; 1455 1456 if (s390_has_feat(S390_FEAT_ZPCI)) { 1457 fiba = get_base_disp_rxy(cpu, run, &ar); 1458 1459 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1460 } else { 1461 return -1; 1462 } 1463 } 1464 1465 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1466 { 1467 int r = 0; 1468 1469 switch (ipa1) { 1470 case PRIV_B9_CLP: 1471 r = kvm_clp_service_call(cpu, run); 1472 break; 1473 case PRIV_B9_PCISTG: 1474 r = kvm_pcistg_service_call(cpu, run); 1475 break; 1476 case PRIV_B9_PCILG: 1477 r = kvm_pcilg_service_call(cpu, run); 1478 break; 1479 case PRIV_B9_RPCIT: 1480 r = kvm_rpcit_service_call(cpu, run); 1481 break; 1482 case PRIV_B9_EQBS: 1483 /* just inject exception */ 1484 r = -1; 1485 break; 1486 default: 1487 r = -1; 1488 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); 1489 break; 1490 } 1491 1492 return r; 1493 } 1494 1495 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1496 { 1497 int r = 0; 1498 1499 switch (ipbl) { 1500 case PRIV_EB_PCISTB: 1501 r = kvm_pcistb_service_call(cpu, run); 1502 break; 1503 case PRIV_EB_SIC: 1504 r = kvm_sic_service_call(cpu, run); 1505 break; 1506 case PRIV_EB_SQBS: 1507 /* just inject exception */ 1508 r = -1; 1509 break; 1510 default: 1511 r = -1; 1512 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); 1513 break; 1514 } 1515 1516 return r; 1517 } 1518 1519 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1520 { 1521 int r = 0; 1522 1523 switch (ipbl) { 1524 case PRIV_E3_MPCIFC: 1525 r = kvm_mpcifc_service_call(cpu, run); 1526 break; 1527 case PRIV_E3_STPCIFC: 1528 r = kvm_stpcifc_service_call(cpu, run); 1529 break; 1530 default: 1531 r = -1; 1532 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); 1533 break; 1534 } 1535 1536 return r; 1537 } 1538 1539 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) 1540 { 1541 CPUS390XState *env = &cpu->env; 1542 int ret; 1543 1544 ret = s390_virtio_hypercall(env); 1545 if (ret == -EINVAL) { 1546 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1547 return 0; 1548 } 1549 1550 return ret; 1551 } 1552 1553 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1554 { 1555 uint64_t r1, r3; 1556 int rc; 1557 1558 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1559 r3 = run->s390_sieic.ipa & 0x000f; 1560 rc = handle_diag_288(&cpu->env, r1, r3); 1561 if (rc) { 1562 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1563 } 1564 } 1565 1566 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1567 { 1568 uint64_t r1, r3; 1569 1570 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1571 r3 = run->s390_sieic.ipa & 0x000f; 1572 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1573 } 1574 1575 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1576 { 1577 CPUS390XState *env = &cpu->env; 1578 unsigned long pc; 1579 1580 pc = env->psw.addr - sw_bp_ilen; 1581 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1582 env->psw.addr = pc; 1583 return EXCP_DEBUG; 1584 } 1585 1586 return -ENOENT; 1587 } 1588 1589 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) 1590 { 1591 CPUS390XState *env = &S390_CPU(cs)->env; 1592 1593 /* Feat bit is set only if KVM supports sync for diag318 */ 1594 if (s390_has_feat(S390_FEAT_DIAG_318)) { 1595 env->diag318_info = diag318_info; 1596 cs->kvm_run->s.regs.diag318 = diag318_info; 1597 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 1598 /* 1599 * diag 318 info is zeroed during a clear reset and 1600 * diag 308 IPL subcodes. 1601 */ 1602 } 1603 } 1604 1605 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) 1606 { 1607 uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; 1608 uint64_t diag318_info = run->s.regs.gprs[reg]; 1609 CPUState *t; 1610 1611 /* 1612 * DIAG 318 can only be enabled with KVM support. As such, let's 1613 * ensure a guest cannot execute this instruction erroneously. 1614 */ 1615 if (!s390_has_feat(S390_FEAT_DIAG_318)) { 1616 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1617 return; 1618 } 1619 1620 CPU_FOREACH(t) { 1621 run_on_cpu(t, s390_do_cpu_set_diag318, 1622 RUN_ON_CPU_HOST_ULONG(diag318_info)); 1623 } 1624 } 1625 1626 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1627 1628 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1629 { 1630 int r = 0; 1631 uint16_t func_code; 1632 1633 /* 1634 * For any diagnose call we support, bits 48-63 of the resulting 1635 * address specify the function code; the remainder is ignored. 1636 */ 1637 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1638 switch (func_code) { 1639 case DIAG_TIMEREVENT: 1640 kvm_handle_diag_288(cpu, run); 1641 break; 1642 case DIAG_IPL: 1643 kvm_handle_diag_308(cpu, run); 1644 break; 1645 case DIAG_SET_CONTROL_PROGRAM_CODES: 1646 handle_diag_318(cpu, run); 1647 break; 1648 case DIAG_KVM_HYPERCALL: 1649 r = handle_hypercall(cpu, run); 1650 break; 1651 case DIAG_KVM_BREAKPOINT: 1652 r = handle_sw_breakpoint(cpu, run); 1653 break; 1654 default: 1655 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); 1656 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1657 break; 1658 } 1659 1660 return r; 1661 } 1662 1663 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1664 { 1665 CPUS390XState *env = &cpu->env; 1666 const uint8_t r1 = ipa1 >> 4; 1667 const uint8_t r3 = ipa1 & 0x0f; 1668 int ret; 1669 uint8_t order; 1670 1671 /* get order code */ 1672 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1673 1674 ret = handle_sigp(env, order, r1, r3); 1675 setcc(cpu, ret); 1676 return 0; 1677 } 1678 1679 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1680 { 1681 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1682 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1683 int r = -1; 1684 1685 DPRINTF("handle_instruction 0x%x 0x%x\n", 1686 run->s390_sieic.ipa, run->s390_sieic.ipb); 1687 switch (ipa0) { 1688 case IPA0_B2: 1689 r = handle_b2(cpu, run, ipa1); 1690 break; 1691 case IPA0_B9: 1692 r = handle_b9(cpu, run, ipa1); 1693 break; 1694 case IPA0_EB: 1695 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1696 break; 1697 case IPA0_E3: 1698 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1699 break; 1700 case IPA0_DIAG: 1701 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1702 break; 1703 case IPA0_SIGP: 1704 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1705 break; 1706 } 1707 1708 if (r < 0) { 1709 r = 0; 1710 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1711 } 1712 1713 return r; 1714 } 1715 1716 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, 1717 int pswoffset) 1718 { 1719 CPUState *cs = CPU(cpu); 1720 1721 s390_cpu_halt(cpu); 1722 cpu->env.crash_reason = reason; 1723 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 1724 } 1725 1726 /* try to detect pgm check loops */ 1727 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1728 { 1729 CPUState *cs = CPU(cpu); 1730 PSW oldpsw, newpsw; 1731 1732 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1733 offsetof(LowCore, program_new_psw)); 1734 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1735 offsetof(LowCore, program_new_psw) + 8); 1736 oldpsw.mask = run->psw_mask; 1737 oldpsw.addr = run->psw_addr; 1738 /* 1739 * Avoid endless loops of operation exceptions, if the pgm new 1740 * PSW will cause a new operation exception. 1741 * The heuristic checks if the pgm new psw is within 6 bytes before 1742 * the faulting psw address (with same DAT, AS settings) and the 1743 * new psw is not a wait psw and the fault was not triggered by 1744 * problem state. In that case go into crashed state. 1745 */ 1746 1747 if (oldpsw.addr - newpsw.addr <= 6 && 1748 !(newpsw.mask & PSW_MASK_WAIT) && 1749 !(oldpsw.mask & PSW_MASK_PSTATE) && 1750 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1751 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1752 unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, 1753 offsetof(LowCore, program_new_psw)); 1754 return EXCP_HALTED; 1755 } 1756 return 0; 1757 } 1758 1759 static int handle_intercept(S390CPU *cpu) 1760 { 1761 CPUState *cs = CPU(cpu); 1762 struct kvm_run *run = cs->kvm_run; 1763 int icpt_code = run->s390_sieic.icptcode; 1764 int r = 0; 1765 1766 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, (long)run->psw_addr); 1767 switch (icpt_code) { 1768 case ICPT_INSTRUCTION: 1769 case ICPT_PV_INSTR: 1770 case ICPT_PV_INSTR_NOTIFICATION: 1771 r = handle_instruction(cpu, run); 1772 break; 1773 case ICPT_PROGRAM: 1774 unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, 1775 offsetof(LowCore, program_new_psw)); 1776 r = EXCP_HALTED; 1777 break; 1778 case ICPT_EXT_INT: 1779 unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, 1780 offsetof(LowCore, external_new_psw)); 1781 r = EXCP_HALTED; 1782 break; 1783 case ICPT_WAITPSW: 1784 /* disabled wait, since enabled wait is handled in kernel */ 1785 s390_handle_wait(cpu); 1786 r = EXCP_HALTED; 1787 break; 1788 case ICPT_CPU_STOP: 1789 do_stop_interrupt(&cpu->env); 1790 r = EXCP_HALTED; 1791 break; 1792 case ICPT_OPEREXC: 1793 /* check for break points */ 1794 r = handle_sw_breakpoint(cpu, run); 1795 if (r == -ENOENT) { 1796 /* Then check for potential pgm check loops */ 1797 r = handle_oper_loop(cpu, run); 1798 if (r == 0) { 1799 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1800 } 1801 } 1802 break; 1803 case ICPT_SOFT_INTERCEPT: 1804 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1805 exit(1); 1806 break; 1807 case ICPT_IO: 1808 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1809 exit(1); 1810 break; 1811 default: 1812 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1813 exit(1); 1814 break; 1815 } 1816 1817 return r; 1818 } 1819 1820 static int handle_tsch(S390CPU *cpu) 1821 { 1822 CPUState *cs = CPU(cpu); 1823 struct kvm_run *run = cs->kvm_run; 1824 int ret; 1825 1826 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1827 RA_IGNORED); 1828 if (ret < 0) { 1829 /* 1830 * Failure. 1831 * If an I/O interrupt had been dequeued, we have to reinject it. 1832 */ 1833 if (run->s390_tsch.dequeued) { 1834 s390_io_interrupt(run->s390_tsch.subchannel_id, 1835 run->s390_tsch.subchannel_nr, 1836 run->s390_tsch.io_int_parm, 1837 run->s390_tsch.io_int_word); 1838 } 1839 ret = 0; 1840 } 1841 return ret; 1842 } 1843 1844 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1845 { 1846 const MachineState *ms = MACHINE(qdev_get_machine()); 1847 uint16_t conf_cpus = 0, reserved_cpus = 0; 1848 SysIB_322 sysib; 1849 int del, i; 1850 1851 if (s390_is_pv()) { 1852 s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); 1853 } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1854 return; 1855 } 1856 /* Shift the stack of Extended Names to prepare for our own data */ 1857 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1858 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1859 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1860 * assumed it's not capable of managing Extended Names for lower levels. 1861 */ 1862 for (del = 1; del < sysib.count; del++) { 1863 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1864 break; 1865 } 1866 } 1867 if (del < sysib.count) { 1868 memset(sysib.ext_names[del], 0, 1869 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1870 } 1871 1872 /* count the cpus and split them into configured and reserved ones */ 1873 for (i = 0; i < ms->possible_cpus->len; i++) { 1874 if (ms->possible_cpus->cpus[i].cpu) { 1875 conf_cpus++; 1876 } else { 1877 reserved_cpus++; 1878 } 1879 } 1880 sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; 1881 sysib.vm[0].conf_cpus = conf_cpus; 1882 sysib.vm[0].reserved_cpus = reserved_cpus; 1883 1884 /* Insert short machine name in EBCDIC, padded with blanks */ 1885 if (qemu_name) { 1886 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1887 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1888 strlen(qemu_name))); 1889 } 1890 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1891 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1892 * considered by s390 as not capable of providing any Extended Name. 1893 * Therefore if no name was specified on qemu invocation, we go with the 1894 * same "KVMguest" default, which KVM has filled into short name field. 1895 */ 1896 strpadcpy((char *)sysib.ext_names[0], 1897 sizeof(sysib.ext_names[0]), 1898 qemu_name ?: "KVMguest", '\0'); 1899 1900 /* Insert UUID */ 1901 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1902 1903 if (s390_is_pv()) { 1904 s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); 1905 } else { 1906 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1907 } 1908 } 1909 1910 static int handle_stsi(S390CPU *cpu) 1911 { 1912 CPUState *cs = CPU(cpu); 1913 struct kvm_run *run = cs->kvm_run; 1914 1915 switch (run->s390_stsi.fc) { 1916 case 3: 1917 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1918 return 0; 1919 } 1920 /* Only sysib 3.2.2 needs post-handling for now. */ 1921 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1922 return 0; 1923 default: 1924 return 0; 1925 } 1926 } 1927 1928 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1929 { 1930 CPUState *cs = CPU(cpu); 1931 struct kvm_run *run = cs->kvm_run; 1932 1933 int ret = 0; 1934 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1935 1936 switch (arch_info->type) { 1937 case KVM_HW_WP_WRITE: 1938 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1939 cs->watchpoint_hit = &hw_watchpoint; 1940 hw_watchpoint.vaddr = arch_info->addr; 1941 hw_watchpoint.flags = BP_MEM_WRITE; 1942 ret = EXCP_DEBUG; 1943 } 1944 break; 1945 case KVM_HW_BP: 1946 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1947 ret = EXCP_DEBUG; 1948 } 1949 break; 1950 case KVM_SINGLESTEP: 1951 if (cs->singlestep_enabled) { 1952 ret = EXCP_DEBUG; 1953 } 1954 break; 1955 default: 1956 ret = -ENOSYS; 1957 } 1958 1959 return ret; 1960 } 1961 1962 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1963 { 1964 S390CPU *cpu = S390_CPU(cs); 1965 int ret = 0; 1966 1967 qemu_mutex_lock_iothread(); 1968 1969 kvm_cpu_synchronize_state(cs); 1970 1971 switch (run->exit_reason) { 1972 case KVM_EXIT_S390_SIEIC: 1973 ret = handle_intercept(cpu); 1974 break; 1975 case KVM_EXIT_S390_RESET: 1976 s390_ipl_reset_request(cs, S390_RESET_REIPL); 1977 break; 1978 case KVM_EXIT_S390_TSCH: 1979 ret = handle_tsch(cpu); 1980 break; 1981 case KVM_EXIT_S390_STSI: 1982 ret = handle_stsi(cpu); 1983 break; 1984 case KVM_EXIT_DEBUG: 1985 ret = kvm_arch_handle_debug_exit(cpu); 1986 break; 1987 default: 1988 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1989 break; 1990 } 1991 qemu_mutex_unlock_iothread(); 1992 1993 if (ret == 0) { 1994 ret = EXCP_INTERRUPT; 1995 } 1996 return ret; 1997 } 1998 1999 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 2000 { 2001 return true; 2002 } 2003 2004 void kvm_s390_enable_css_support(S390CPU *cpu) 2005 { 2006 int r; 2007 2008 /* Activate host kernel channel subsystem support. */ 2009 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 2010 assert(r == 0); 2011 } 2012 2013 void kvm_arch_init_irq_routing(KVMState *s) 2014 { 2015 /* 2016 * Note that while irqchip capabilities generally imply that cpustates 2017 * are handled in-kernel, it is not true for s390 (yet); therefore, we 2018 * have to override the common code kvm_halt_in_kernel_allowed setting. 2019 */ 2020 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2021 kvm_gsi_routing_allowed = true; 2022 kvm_halt_in_kernel_allowed = false; 2023 } 2024 } 2025 2026 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 2027 int vq, bool assign) 2028 { 2029 struct kvm_ioeventfd kick = { 2030 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 2031 KVM_IOEVENTFD_FLAG_DATAMATCH, 2032 .fd = event_notifier_get_fd(notifier), 2033 .datamatch = vq, 2034 .addr = sch, 2035 .len = 8, 2036 }; 2037 trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, 2038 kick.datamatch); 2039 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 2040 return -ENOSYS; 2041 } 2042 if (!assign) { 2043 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 2044 } 2045 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 2046 } 2047 2048 int kvm_s390_get_ri(void) 2049 { 2050 return cap_ri; 2051 } 2052 2053 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2054 { 2055 struct kvm_mp_state mp_state = {}; 2056 int ret; 2057 2058 /* the kvm part might not have been initialized yet */ 2059 if (CPU(cpu)->kvm_state == NULL) { 2060 return 0; 2061 } 2062 2063 switch (cpu_state) { 2064 case S390_CPU_STATE_STOPPED: 2065 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2066 break; 2067 case S390_CPU_STATE_CHECK_STOP: 2068 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2069 break; 2070 case S390_CPU_STATE_OPERATING: 2071 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2072 break; 2073 case S390_CPU_STATE_LOAD: 2074 mp_state.mp_state = KVM_MP_STATE_LOAD; 2075 break; 2076 default: 2077 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2078 cpu_state); 2079 exit(1); 2080 } 2081 2082 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2083 if (ret) { 2084 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2085 strerror(-ret)); 2086 } 2087 2088 return ret; 2089 } 2090 2091 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2092 { 2093 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 2094 struct kvm_s390_irq_state irq_state = { 2095 .buf = (uint64_t) cpu->irqstate, 2096 .len = VCPU_IRQ_BUF_SIZE(max_cpus), 2097 }; 2098 CPUState *cs = CPU(cpu); 2099 int32_t bytes; 2100 2101 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2102 return; 2103 } 2104 2105 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2106 if (bytes < 0) { 2107 cpu->irqstate_saved_size = 0; 2108 error_report("Migration of interrupt state failed"); 2109 return; 2110 } 2111 2112 cpu->irqstate_saved_size = bytes; 2113 } 2114 2115 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2116 { 2117 CPUState *cs = CPU(cpu); 2118 struct kvm_s390_irq_state irq_state = { 2119 .buf = (uint64_t) cpu->irqstate, 2120 .len = cpu->irqstate_saved_size, 2121 }; 2122 int r; 2123 2124 if (cpu->irqstate_saved_size == 0) { 2125 return 0; 2126 } 2127 2128 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2129 return -ENOSYS; 2130 } 2131 2132 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2133 if (r) { 2134 error_report("Setting interrupt state failed %d", r); 2135 } 2136 return r; 2137 } 2138 2139 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2140 uint64_t address, uint32_t data, PCIDevice *dev) 2141 { 2142 S390PCIBusDevice *pbdev; 2143 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2144 2145 if (!dev) { 2146 DPRINTF("add_msi_route no pci device\n"); 2147 return -ENODEV; 2148 } 2149 2150 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2151 if (!pbdev) { 2152 DPRINTF("add_msi_route no zpci device\n"); 2153 return -ENODEV; 2154 } 2155 2156 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2157 route->flags = 0; 2158 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2159 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2160 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2161 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2162 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2163 return 0; 2164 } 2165 2166 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2167 int vector, PCIDevice *dev) 2168 { 2169 return 0; 2170 } 2171 2172 int kvm_arch_release_virq_post(int virq) 2173 { 2174 return 0; 2175 } 2176 2177 int kvm_arch_msi_data_to_gsi(uint32_t data) 2178 { 2179 abort(); 2180 } 2181 2182 static int query_cpu_subfunc(S390FeatBitmap features) 2183 { 2184 struct kvm_s390_vm_cpu_subfunc prop = {}; 2185 struct kvm_device_attr attr = { 2186 .group = KVM_S390_VM_CPU_MODEL, 2187 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2188 .addr = (uint64_t) &prop, 2189 }; 2190 int rc; 2191 2192 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2193 if (rc) { 2194 return rc; 2195 } 2196 2197 /* 2198 * We're going to add all subfunctions now, if the corresponding feature 2199 * is available that unlocks the query functions. 2200 */ 2201 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2202 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2203 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2204 } 2205 if (test_bit(S390_FEAT_MSA, features)) { 2206 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2207 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2208 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2209 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2210 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2211 } 2212 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2213 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2214 } 2215 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2216 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2217 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2218 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2219 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2220 } 2221 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2222 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2223 } 2224 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2225 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2226 } 2227 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2228 s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2229 } 2230 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2231 s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2232 } 2233 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2234 s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2235 } 2236 return 0; 2237 } 2238 2239 static int configure_cpu_subfunc(const S390FeatBitmap features) 2240 { 2241 struct kvm_s390_vm_cpu_subfunc prop = {}; 2242 struct kvm_device_attr attr = { 2243 .group = KVM_S390_VM_CPU_MODEL, 2244 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2245 .addr = (uint64_t) &prop, 2246 }; 2247 2248 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2249 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2250 /* hardware support might be missing, IBC will handle most of this */ 2251 return 0; 2252 } 2253 2254 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2255 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2256 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2257 } 2258 if (test_bit(S390_FEAT_MSA, features)) { 2259 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2260 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2261 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2262 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2263 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2264 } 2265 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2266 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2267 } 2268 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2269 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2270 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2271 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2272 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2273 } 2274 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2275 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2276 } 2277 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2278 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2279 } 2280 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2281 s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2282 } 2283 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2284 s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2285 } 2286 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2287 s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2288 } 2289 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2290 } 2291 2292 static int kvm_to_feat[][2] = { 2293 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2294 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2295 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2296 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2297 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2298 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2299 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2300 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2301 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2302 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2303 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2304 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2305 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2306 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2307 }; 2308 2309 static int query_cpu_feat(S390FeatBitmap features) 2310 { 2311 struct kvm_s390_vm_cpu_feat prop = {}; 2312 struct kvm_device_attr attr = { 2313 .group = KVM_S390_VM_CPU_MODEL, 2314 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2315 .addr = (uint64_t) &prop, 2316 }; 2317 int rc; 2318 int i; 2319 2320 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2321 if (rc) { 2322 return rc; 2323 } 2324 2325 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2326 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2327 set_bit(kvm_to_feat[i][1], features); 2328 } 2329 } 2330 return 0; 2331 } 2332 2333 static int configure_cpu_feat(const S390FeatBitmap features) 2334 { 2335 struct kvm_s390_vm_cpu_feat prop = {}; 2336 struct kvm_device_attr attr = { 2337 .group = KVM_S390_VM_CPU_MODEL, 2338 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2339 .addr = (uint64_t) &prop, 2340 }; 2341 int i; 2342 2343 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2344 if (test_bit(kvm_to_feat[i][1], features)) { 2345 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2346 } 2347 } 2348 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2349 } 2350 2351 bool kvm_s390_cpu_models_supported(void) 2352 { 2353 if (!cpu_model_allowed()) { 2354 /* compatibility machines interfere with the cpu model */ 2355 return false; 2356 } 2357 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2358 KVM_S390_VM_CPU_MACHINE) && 2359 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2360 KVM_S390_VM_CPU_PROCESSOR) && 2361 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2362 KVM_S390_VM_CPU_MACHINE_FEAT) && 2363 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2364 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2365 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2366 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2367 } 2368 2369 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2370 { 2371 struct kvm_s390_vm_cpu_machine prop = {}; 2372 struct kvm_device_attr attr = { 2373 .group = KVM_S390_VM_CPU_MODEL, 2374 .attr = KVM_S390_VM_CPU_MACHINE, 2375 .addr = (uint64_t) &prop, 2376 }; 2377 uint16_t unblocked_ibc = 0, cpu_type = 0; 2378 int rc; 2379 2380 memset(model, 0, sizeof(*model)); 2381 2382 if (!kvm_s390_cpu_models_supported()) { 2383 error_setg(errp, "KVM doesn't support CPU models"); 2384 return; 2385 } 2386 2387 /* query the basic cpu model properties */ 2388 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2389 if (rc) { 2390 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2391 return; 2392 } 2393 2394 cpu_type = cpuid_type(prop.cpuid); 2395 if (has_ibc(prop.ibc)) { 2396 model->lowest_ibc = lowest_ibc(prop.ibc); 2397 unblocked_ibc = unblocked_ibc(prop.ibc); 2398 } 2399 model->cpu_id = cpuid_id(prop.cpuid); 2400 model->cpu_id_format = cpuid_format(prop.cpuid); 2401 model->cpu_ver = 0xff; 2402 2403 /* get supported cpu features indicated via STFL(E) */ 2404 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2405 (uint8_t *) prop.fac_mask); 2406 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2407 if (test_bit(S390_FEAT_STFLE, model->features)) { 2408 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2409 } 2410 /* get supported cpu features indicated e.g. via SCLP */ 2411 rc = query_cpu_feat(model->features); 2412 if (rc) { 2413 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2414 return; 2415 } 2416 /* get supported cpu subfunctions indicated via query / test bit */ 2417 rc = query_cpu_subfunc(model->features); 2418 if (rc) { 2419 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2420 return; 2421 } 2422 2423 /* PTFF subfunctions might be indicated although kernel support missing */ 2424 if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { 2425 clear_bit(S390_FEAT_PTFF_QSIE, model->features); 2426 clear_bit(S390_FEAT_PTFF_QTOUE, model->features); 2427 clear_bit(S390_FEAT_PTFF_STOE, model->features); 2428 clear_bit(S390_FEAT_PTFF_STOUE, model->features); 2429 } 2430 2431 /* with cpu model support, CMM is only indicated if really available */ 2432 if (kvm_s390_cmma_available()) { 2433 set_bit(S390_FEAT_CMM, model->features); 2434 } else { 2435 /* no cmm -> no cmm nt */ 2436 clear_bit(S390_FEAT_CMM_NT, model->features); 2437 } 2438 2439 /* bpb needs kernel support for migration, VSIE and reset */ 2440 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { 2441 clear_bit(S390_FEAT_BPB, model->features); 2442 } 2443 2444 /* 2445 * If we have support for protected virtualization, indicate 2446 * the protected virtualization IPL unpack facility. 2447 */ 2448 if (cap_protected) { 2449 set_bit(S390_FEAT_UNPACK, model->features); 2450 } 2451 2452 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2453 set_bit(S390_FEAT_ZPCI, model->features); 2454 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2455 2456 if (s390_known_cpu_type(cpu_type)) { 2457 /* we want the exact model, even if some features are missing */ 2458 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2459 ibc_ec_ga(unblocked_ibc), NULL); 2460 } else { 2461 /* model unknown, e.g. too new - search using features */ 2462 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2463 ibc_ec_ga(unblocked_ibc), 2464 model->features); 2465 } 2466 if (!model->def) { 2467 error_setg(errp, "KVM: host CPU model could not be identified"); 2468 return; 2469 } 2470 /* for now, we can only provide the AP feature with HW support */ 2471 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, 2472 KVM_S390_VM_CRYPTO_ENABLE_APIE)) { 2473 set_bit(S390_FEAT_AP, model->features); 2474 } 2475 2476 /* 2477 * Extended-Length SCCB is handled entirely within QEMU. 2478 * For PV guests this is completely fenced by the Ultravisor, as Service 2479 * Call error checking and STFLE interpretation are handled via SIE. 2480 */ 2481 set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); 2482 2483 if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { 2484 set_bit(S390_FEAT_DIAG_318, model->features); 2485 } 2486 2487 /* strip of features that are not part of the maximum model */ 2488 bitmap_and(model->features, model->features, model->def->full_feat, 2489 S390_FEAT_MAX); 2490 } 2491 2492 static void kvm_s390_configure_apie(bool interpret) 2493 { 2494 uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : 2495 KVM_S390_VM_CRYPTO_DISABLE_APIE; 2496 2497 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 2498 kvm_s390_set_attr(attr); 2499 } 2500 } 2501 2502 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2503 { 2504 struct kvm_s390_vm_cpu_processor prop = { 2505 .fac_list = { 0 }, 2506 }; 2507 struct kvm_device_attr attr = { 2508 .group = KVM_S390_VM_CPU_MODEL, 2509 .attr = KVM_S390_VM_CPU_PROCESSOR, 2510 .addr = (uint64_t) &prop, 2511 }; 2512 int rc; 2513 2514 if (!model) { 2515 /* compatibility handling if cpu models are disabled */ 2516 if (kvm_s390_cmma_available()) { 2517 kvm_s390_enable_cmma(); 2518 } 2519 return; 2520 } 2521 if (!kvm_s390_cpu_models_supported()) { 2522 error_setg(errp, "KVM doesn't support CPU models"); 2523 return; 2524 } 2525 prop.cpuid = s390_cpuid_from_cpu_model(model); 2526 prop.ibc = s390_ibc_from_cpu_model(model); 2527 /* configure cpu features indicated via STFL(e) */ 2528 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2529 (uint8_t *) prop.fac_list); 2530 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2531 if (rc) { 2532 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2533 return; 2534 } 2535 /* configure cpu features indicated e.g. via SCLP */ 2536 rc = configure_cpu_feat(model->features); 2537 if (rc) { 2538 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2539 return; 2540 } 2541 /* configure cpu subfunctions indicated via query / test bit */ 2542 rc = configure_cpu_subfunc(model->features); 2543 if (rc) { 2544 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2545 return; 2546 } 2547 /* enable CMM via CMMA */ 2548 if (test_bit(S390_FEAT_CMM, model->features)) { 2549 kvm_s390_enable_cmma(); 2550 } 2551 2552 if (test_bit(S390_FEAT_AP, model->features)) { 2553 kvm_s390_configure_apie(true); 2554 } 2555 } 2556 2557 void kvm_s390_restart_interrupt(S390CPU *cpu) 2558 { 2559 struct kvm_s390_irq irq = { 2560 .type = KVM_S390_RESTART, 2561 }; 2562 2563 kvm_s390_vcpu_interrupt(cpu, &irq); 2564 } 2565 2566 void kvm_s390_stop_interrupt(S390CPU *cpu) 2567 { 2568 struct kvm_s390_irq irq = { 2569 .type = KVM_S390_SIGP_STOP, 2570 }; 2571 2572 kvm_s390_vcpu_interrupt(cpu, &irq); 2573 } 2574 2575 bool kvm_arch_cpu_check_are_resettable(void) 2576 { 2577 return true; 2578 } 2579 2580 int kvm_s390_get_zpci_op(void) 2581 { 2582 return cap_zpci_op; 2583 } 2584 2585 void kvm_arch_accel_class_init(ObjectClass *oc) 2586 { 2587 } 2588