1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "sysemu/hw_accel.h" 4 #include "sysemu/sysemu.h" 5 #include "qemu/log.h" 6 #include "cpu.h" 7 #include "exec/exec-all.h" 8 #include "helper_regs.h" 9 #include "hw/ppc/spapr.h" 10 #include "mmu-hash64.h" 11 #include "cpu-models.h" 12 #include "trace.h" 13 #include "kvm_ppc.h" 14 #include "hw/ppc/spapr_ovec.h" 15 16 struct SPRSyncState { 17 int spr; 18 target_ulong value; 19 target_ulong mask; 20 }; 21 22 static void do_spr_sync(CPUState *cs, run_on_cpu_data arg) 23 { 24 struct SPRSyncState *s = arg.host_ptr; 25 PowerPCCPU *cpu = POWERPC_CPU(cs); 26 CPUPPCState *env = &cpu->env; 27 28 cpu_synchronize_state(cs); 29 env->spr[s->spr] &= ~s->mask; 30 env->spr[s->spr] |= s->value; 31 } 32 33 static void set_spr(CPUState *cs, int spr, target_ulong value, 34 target_ulong mask) 35 { 36 struct SPRSyncState s = { 37 .spr = spr, 38 .value = value, 39 .mask = mask 40 }; 41 run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s)); 42 } 43 44 static bool has_spr(PowerPCCPU *cpu, int spr) 45 { 46 /* We can test whether the SPR is defined by checking for a valid name */ 47 return cpu->env.spr_cb[spr].name != NULL; 48 } 49 50 static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex) 51 { 52 /* 53 * hash value/pteg group index is normalized by HPT mask 54 */ 55 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) { 56 return false; 57 } 58 return true; 59 } 60 61 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr) 62 { 63 MachineState *machine = MACHINE(spapr); 64 MemoryHotplugState *hpms = &spapr->hotplug_memory; 65 66 if (addr < machine->ram_size) { 67 return true; 68 } 69 if ((addr >= hpms->base) 70 && ((addr - hpms->base) < memory_region_size(&hpms->mr))) { 71 return true; 72 } 73 74 return false; 75 } 76 77 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, 78 target_ulong opcode, target_ulong *args) 79 { 80 target_ulong flags = args[0]; 81 target_ulong ptex = args[1]; 82 target_ulong pteh = args[2]; 83 target_ulong ptel = args[3]; 84 unsigned apshift; 85 target_ulong raddr; 86 target_ulong slot; 87 const ppc_hash_pte64_t *hptes; 88 89 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel); 90 if (!apshift) { 91 /* Bad page size encoding */ 92 return H_PARAMETER; 93 } 94 95 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); 96 97 if (is_ram_address(spapr, raddr)) { 98 /* Regular RAM - should have WIMG=0010 */ 99 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { 100 return H_PARAMETER; 101 } 102 } else { 103 target_ulong wimg_flags; 104 /* Looks like an IO address */ 105 /* FIXME: What WIMG combinations could be sensible for IO? 106 * For now we allow WIMG=010x, but are there others? */ 107 /* FIXME: Should we check against registered IO addresses? */ 108 wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)); 109 110 if (wimg_flags != HPTE64_R_I && 111 wimg_flags != (HPTE64_R_I | HPTE64_R_M)) { 112 return H_PARAMETER; 113 } 114 } 115 116 pteh &= ~0x60ULL; 117 118 if (!valid_ptex(cpu, ptex)) { 119 return H_PARAMETER; 120 } 121 122 slot = ptex & 7ULL; 123 ptex = ptex & ~7ULL; 124 125 if (likely((flags & H_EXACT) == 0)) { 126 hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); 127 for (slot = 0; slot < 8; slot++) { 128 if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) { 129 break; 130 } 131 } 132 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); 133 if (slot == 8) { 134 return H_PTEG_FULL; 135 } 136 } else { 137 hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1); 138 if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) { 139 ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1); 140 return H_PTEG_FULL; 141 } 142 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); 143 } 144 145 ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); 146 147 args[0] = ptex + slot; 148 return H_SUCCESS; 149 } 150 151 typedef enum { 152 REMOVE_SUCCESS = 0, 153 REMOVE_NOT_FOUND = 1, 154 REMOVE_PARM = 2, 155 REMOVE_HW = 3, 156 } RemoveResult; 157 158 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, 159 target_ulong avpn, 160 target_ulong flags, 161 target_ulong *vp, target_ulong *rp) 162 { 163 const ppc_hash_pte64_t *hptes; 164 target_ulong v, r; 165 166 if (!valid_ptex(cpu, ptex)) { 167 return REMOVE_PARM; 168 } 169 170 hptes = ppc_hash64_map_hptes(cpu, ptex, 1); 171 v = ppc_hash64_hpte0(cpu, hptes, 0); 172 r = ppc_hash64_hpte1(cpu, hptes, 0); 173 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); 174 175 if ((v & HPTE64_V_VALID) == 0 || 176 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || 177 ((flags & H_ANDCOND) && (v & avpn) != 0)) { 178 return REMOVE_NOT_FOUND; 179 } 180 *vp = v; 181 *rp = r; 182 ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); 183 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); 184 return REMOVE_SUCCESS; 185 } 186 187 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 188 target_ulong opcode, target_ulong *args) 189 { 190 CPUPPCState *env = &cpu->env; 191 target_ulong flags = args[0]; 192 target_ulong ptex = args[1]; 193 target_ulong avpn = args[2]; 194 RemoveResult ret; 195 196 ret = remove_hpte(cpu, ptex, avpn, flags, 197 &args[0], &args[1]); 198 199 switch (ret) { 200 case REMOVE_SUCCESS: 201 check_tlb_flush(env, true); 202 return H_SUCCESS; 203 204 case REMOVE_NOT_FOUND: 205 return H_NOT_FOUND; 206 207 case REMOVE_PARM: 208 return H_PARAMETER; 209 210 case REMOVE_HW: 211 return H_HARDWARE; 212 } 213 214 g_assert_not_reached(); 215 } 216 217 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL 218 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL 219 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL 220 #define H_BULK_REMOVE_END 0xc000000000000000ULL 221 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL 222 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL 223 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL 224 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL 225 #define H_BULK_REMOVE_HW 0x3000000000000000ULL 226 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL 227 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL 228 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL 229 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL 230 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL 231 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL 232 233 #define H_BULK_REMOVE_MAX_BATCH 4 234 235 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 236 target_ulong opcode, target_ulong *args) 237 { 238 CPUPPCState *env = &cpu->env; 239 int i; 240 target_ulong rc = H_SUCCESS; 241 242 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 243 target_ulong *tsh = &args[i*2]; 244 target_ulong tsl = args[i*2 + 1]; 245 target_ulong v, r, ret; 246 247 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { 248 break; 249 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { 250 return H_PARAMETER; 251 } 252 253 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; 254 *tsh |= H_BULK_REMOVE_RESPONSE; 255 256 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { 257 *tsh |= H_BULK_REMOVE_PARM; 258 return H_PARAMETER; 259 } 260 261 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl, 262 (*tsh & H_BULK_REMOVE_FLAGS) >> 26, 263 &v, &r); 264 265 *tsh |= ret << 60; 266 267 switch (ret) { 268 case REMOVE_SUCCESS: 269 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; 270 break; 271 272 case REMOVE_PARM: 273 rc = H_PARAMETER; 274 goto exit; 275 276 case REMOVE_HW: 277 rc = H_HARDWARE; 278 goto exit; 279 } 280 } 281 exit: 282 check_tlb_flush(env, true); 283 284 return rc; 285 } 286 287 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, 288 target_ulong opcode, target_ulong *args) 289 { 290 CPUPPCState *env = &cpu->env; 291 target_ulong flags = args[0]; 292 target_ulong ptex = args[1]; 293 target_ulong avpn = args[2]; 294 const ppc_hash_pte64_t *hptes; 295 target_ulong v, r; 296 297 if (!valid_ptex(cpu, ptex)) { 298 return H_PARAMETER; 299 } 300 301 hptes = ppc_hash64_map_hptes(cpu, ptex, 1); 302 v = ppc_hash64_hpte0(cpu, hptes, 0); 303 r = ppc_hash64_hpte1(cpu, hptes, 0); 304 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); 305 306 if ((v & HPTE64_V_VALID) == 0 || 307 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { 308 return H_NOT_FOUND; 309 } 310 311 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | 312 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); 313 r |= (flags << 55) & HPTE64_R_PP0; 314 r |= (flags << 48) & HPTE64_R_KEY_HI; 315 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); 316 ppc_hash64_store_hpte(cpu, ptex, 317 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); 318 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); 319 /* Flush the tlb */ 320 check_tlb_flush(env, true); 321 /* Don't need a memory barrier, due to qemu's global lock */ 322 ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); 323 return H_SUCCESS; 324 } 325 326 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr, 327 target_ulong opcode, target_ulong *args) 328 { 329 target_ulong flags = args[0]; 330 target_ulong ptex = args[1]; 331 uint8_t *hpte; 332 int i, ridx, n_entries = 1; 333 334 if (!valid_ptex(cpu, ptex)) { 335 return H_PARAMETER; 336 } 337 338 if (flags & H_READ_4) { 339 /* Clear the two low order bits */ 340 ptex &= ~(3ULL); 341 n_entries = 4; 342 } 343 344 hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64); 345 346 for (i = 0, ridx = 0; i < n_entries; i++) { 347 args[ridx++] = ldq_p(hpte); 348 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); 349 hpte += HASH_PTE_SIZE_64; 350 } 351 352 return H_SUCCESS; 353 } 354 355 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr, 356 target_ulong opcode, target_ulong *args) 357 { 358 cpu_synchronize_state(CPU(cpu)); 359 cpu->env.spr[SPR_SPRG0] = args[0]; 360 361 return H_SUCCESS; 362 } 363 364 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 365 target_ulong opcode, target_ulong *args) 366 { 367 if (!has_spr(cpu, SPR_DABR)) { 368 return H_HARDWARE; /* DABR register not available */ 369 } 370 cpu_synchronize_state(CPU(cpu)); 371 372 if (has_spr(cpu, SPR_DABRX)) { 373 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */ 374 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */ 375 return H_RESERVED_DABR; 376 } 377 378 cpu->env.spr[SPR_DABR] = args[0]; 379 return H_SUCCESS; 380 } 381 382 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 383 target_ulong opcode, target_ulong *args) 384 { 385 target_ulong dabrx = args[1]; 386 387 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) { 388 return H_HARDWARE; 389 } 390 391 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0 392 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) { 393 return H_PARAMETER; 394 } 395 396 cpu_synchronize_state(CPU(cpu)); 397 cpu->env.spr[SPR_DABRX] = dabrx; 398 cpu->env.spr[SPR_DABR] = args[0]; 399 400 return H_SUCCESS; 401 } 402 403 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr, 404 target_ulong opcode, target_ulong *args) 405 { 406 target_ulong flags = args[0]; 407 hwaddr dst = args[1]; 408 hwaddr src = args[2]; 409 hwaddr len = TARGET_PAGE_SIZE; 410 uint8_t *pdst, *psrc; 411 target_long ret = H_SUCCESS; 412 413 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE 414 | H_COPY_PAGE | H_ZERO_PAGE)) { 415 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n", 416 flags); 417 return H_PARAMETER; 418 } 419 420 /* Map-in destination */ 421 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) { 422 return H_PARAMETER; 423 } 424 pdst = cpu_physical_memory_map(dst, &len, 1); 425 if (!pdst || len != TARGET_PAGE_SIZE) { 426 return H_PARAMETER; 427 } 428 429 if (flags & H_COPY_PAGE) { 430 /* Map-in source, copy to destination, and unmap source again */ 431 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) { 432 ret = H_PARAMETER; 433 goto unmap_out; 434 } 435 psrc = cpu_physical_memory_map(src, &len, 0); 436 if (!psrc || len != TARGET_PAGE_SIZE) { 437 ret = H_PARAMETER; 438 goto unmap_out; 439 } 440 memcpy(pdst, psrc, len); 441 cpu_physical_memory_unmap(psrc, len, 0, len); 442 } else if (flags & H_ZERO_PAGE) { 443 memset(pdst, 0, len); /* Just clear the destination page */ 444 } 445 446 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) { 447 kvmppc_dcbst_range(cpu, pdst, len); 448 } 449 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { 450 if (kvm_enabled()) { 451 kvmppc_icbi_range(cpu, pdst, len); 452 } else { 453 tb_flush(CPU(cpu)); 454 } 455 } 456 457 unmap_out: 458 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len); 459 return ret; 460 } 461 462 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL 463 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL 464 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL 465 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL 466 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL 467 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL 468 469 #define VPA_MIN_SIZE 640 470 #define VPA_SIZE_OFFSET 0x4 471 #define VPA_SHARED_PROC_OFFSET 0x9 472 #define VPA_SHARED_PROC_VAL 0x2 473 474 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) 475 { 476 CPUState *cs = CPU(ppc_env_get_cpu(env)); 477 uint16_t size; 478 uint8_t tmp; 479 480 if (vpa == 0) { 481 hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); 482 return H_HARDWARE; 483 } 484 485 if (vpa % env->dcache_line_size) { 486 return H_PARAMETER; 487 } 488 /* FIXME: bounds check the address */ 489 490 size = lduw_be_phys(cs->as, vpa + 0x4); 491 492 if (size < VPA_MIN_SIZE) { 493 return H_PARAMETER; 494 } 495 496 /* VPA is not allowed to cross a page boundary */ 497 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { 498 return H_PARAMETER; 499 } 500 501 env->vpa_addr = vpa; 502 503 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET); 504 tmp |= VPA_SHARED_PROC_VAL; 505 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); 506 507 return H_SUCCESS; 508 } 509 510 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa) 511 { 512 if (env->slb_shadow_addr) { 513 return H_RESOURCE; 514 } 515 516 if (env->dtl_addr) { 517 return H_RESOURCE; 518 } 519 520 env->vpa_addr = 0; 521 return H_SUCCESS; 522 } 523 524 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) 525 { 526 CPUState *cs = CPU(ppc_env_get_cpu(env)); 527 uint32_t size; 528 529 if (addr == 0) { 530 hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); 531 return H_HARDWARE; 532 } 533 534 size = ldl_be_phys(cs->as, addr + 0x4); 535 if (size < 0x8) { 536 return H_PARAMETER; 537 } 538 539 if ((addr / 4096) != ((addr + size - 1) / 4096)) { 540 return H_PARAMETER; 541 } 542 543 if (!env->vpa_addr) { 544 return H_RESOURCE; 545 } 546 547 env->slb_shadow_addr = addr; 548 env->slb_shadow_size = size; 549 550 return H_SUCCESS; 551 } 552 553 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr) 554 { 555 env->slb_shadow_addr = 0; 556 env->slb_shadow_size = 0; 557 return H_SUCCESS; 558 } 559 560 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) 561 { 562 CPUState *cs = CPU(ppc_env_get_cpu(env)); 563 uint32_t size; 564 565 if (addr == 0) { 566 hcall_dprintf("Can't cope with DTL at logical 0\n"); 567 return H_HARDWARE; 568 } 569 570 size = ldl_be_phys(cs->as, addr + 0x4); 571 572 if (size < 48) { 573 return H_PARAMETER; 574 } 575 576 if (!env->vpa_addr) { 577 return H_RESOURCE; 578 } 579 580 env->dtl_addr = addr; 581 env->dtl_size = size; 582 583 return H_SUCCESS; 584 } 585 586 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr) 587 { 588 env->dtl_addr = 0; 589 env->dtl_size = 0; 590 591 return H_SUCCESS; 592 } 593 594 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr, 595 target_ulong opcode, target_ulong *args) 596 { 597 target_ulong flags = args[0]; 598 target_ulong procno = args[1]; 599 target_ulong vpa = args[2]; 600 target_ulong ret = H_PARAMETER; 601 CPUPPCState *tenv; 602 PowerPCCPU *tcpu; 603 604 tcpu = ppc_get_vcpu_by_dt_id(procno); 605 if (!tcpu) { 606 return H_PARAMETER; 607 } 608 tenv = &tcpu->env; 609 610 switch (flags) { 611 case FLAGS_REGISTER_VPA: 612 ret = register_vpa(tenv, vpa); 613 break; 614 615 case FLAGS_DEREGISTER_VPA: 616 ret = deregister_vpa(tenv, vpa); 617 break; 618 619 case FLAGS_REGISTER_SLBSHADOW: 620 ret = register_slb_shadow(tenv, vpa); 621 break; 622 623 case FLAGS_DEREGISTER_SLBSHADOW: 624 ret = deregister_slb_shadow(tenv, vpa); 625 break; 626 627 case FLAGS_REGISTER_DTL: 628 ret = register_dtl(tenv, vpa); 629 break; 630 631 case FLAGS_DEREGISTER_DTL: 632 ret = deregister_dtl(tenv, vpa); 633 break; 634 } 635 636 return ret; 637 } 638 639 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr, 640 target_ulong opcode, target_ulong *args) 641 { 642 CPUPPCState *env = &cpu->env; 643 CPUState *cs = CPU(cpu); 644 645 env->msr |= (1ULL << MSR_EE); 646 hreg_compute_hflags(env); 647 if (!cpu_has_work(cs)) { 648 cs->halted = 1; 649 cs->exception_index = EXCP_HLT; 650 cs->exit_request = 1; 651 } 652 return H_SUCCESS; 653 } 654 655 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr, 656 target_ulong opcode, target_ulong *args) 657 { 658 target_ulong rtas_r3 = args[0]; 659 uint32_t token = rtas_ld(rtas_r3, 0); 660 uint32_t nargs = rtas_ld(rtas_r3, 1); 661 uint32_t nret = rtas_ld(rtas_r3, 2); 662 663 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, 664 nret, rtas_r3 + 12 + 4*nargs); 665 } 666 667 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr, 668 target_ulong opcode, target_ulong *args) 669 { 670 CPUState *cs = CPU(cpu); 671 target_ulong size = args[0]; 672 target_ulong addr = args[1]; 673 674 switch (size) { 675 case 1: 676 args[0] = ldub_phys(cs->as, addr); 677 return H_SUCCESS; 678 case 2: 679 args[0] = lduw_phys(cs->as, addr); 680 return H_SUCCESS; 681 case 4: 682 args[0] = ldl_phys(cs->as, addr); 683 return H_SUCCESS; 684 case 8: 685 args[0] = ldq_phys(cs->as, addr); 686 return H_SUCCESS; 687 } 688 return H_PARAMETER; 689 } 690 691 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr, 692 target_ulong opcode, target_ulong *args) 693 { 694 CPUState *cs = CPU(cpu); 695 696 target_ulong size = args[0]; 697 target_ulong addr = args[1]; 698 target_ulong val = args[2]; 699 700 switch (size) { 701 case 1: 702 stb_phys(cs->as, addr, val); 703 return H_SUCCESS; 704 case 2: 705 stw_phys(cs->as, addr, val); 706 return H_SUCCESS; 707 case 4: 708 stl_phys(cs->as, addr, val); 709 return H_SUCCESS; 710 case 8: 711 stq_phys(cs->as, addr, val); 712 return H_SUCCESS; 713 } 714 return H_PARAMETER; 715 } 716 717 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr, 718 target_ulong opcode, target_ulong *args) 719 { 720 CPUState *cs = CPU(cpu); 721 722 target_ulong dst = args[0]; /* Destination address */ 723 target_ulong src = args[1]; /* Source address */ 724 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ 725 target_ulong count = args[3]; /* Element count */ 726 target_ulong op = args[4]; /* 0 = copy, 1 = invert */ 727 uint64_t tmp; 728 unsigned int mask = (1 << esize) - 1; 729 int step = 1 << esize; 730 731 if (count > 0x80000000) { 732 return H_PARAMETER; 733 } 734 735 if ((dst & mask) || (src & mask) || (op > 1)) { 736 return H_PARAMETER; 737 } 738 739 if (dst >= src && dst < (src + (count << esize))) { 740 dst = dst + ((count - 1) << esize); 741 src = src + ((count - 1) << esize); 742 step = -step; 743 } 744 745 while (count--) { 746 switch (esize) { 747 case 0: 748 tmp = ldub_phys(cs->as, src); 749 break; 750 case 1: 751 tmp = lduw_phys(cs->as, src); 752 break; 753 case 2: 754 tmp = ldl_phys(cs->as, src); 755 break; 756 case 3: 757 tmp = ldq_phys(cs->as, src); 758 break; 759 default: 760 return H_PARAMETER; 761 } 762 if (op == 1) { 763 tmp = ~tmp; 764 } 765 switch (esize) { 766 case 0: 767 stb_phys(cs->as, dst, tmp); 768 break; 769 case 1: 770 stw_phys(cs->as, dst, tmp); 771 break; 772 case 2: 773 stl_phys(cs->as, dst, tmp); 774 break; 775 case 3: 776 stq_phys(cs->as, dst, tmp); 777 break; 778 } 779 dst = dst + step; 780 src = src + step; 781 } 782 783 return H_SUCCESS; 784 } 785 786 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 787 target_ulong opcode, target_ulong *args) 788 { 789 /* Nothing to do on emulation, KVM will trap this in the kernel */ 790 return H_SUCCESS; 791 } 792 793 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr, 794 target_ulong opcode, target_ulong *args) 795 { 796 /* Nothing to do on emulation, KVM will trap this in the kernel */ 797 return H_SUCCESS; 798 } 799 800 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, 801 target_ulong mflags, 802 target_ulong value1, 803 target_ulong value2) 804 { 805 CPUState *cs; 806 807 if (value1) { 808 return H_P3; 809 } 810 if (value2) { 811 return H_P4; 812 } 813 814 switch (mflags) { 815 case H_SET_MODE_ENDIAN_BIG: 816 CPU_FOREACH(cs) { 817 set_spr(cs, SPR_LPCR, 0, LPCR_ILE); 818 } 819 spapr_pci_switch_vga(true); 820 return H_SUCCESS; 821 822 case H_SET_MODE_ENDIAN_LITTLE: 823 CPU_FOREACH(cs) { 824 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE); 825 } 826 spapr_pci_switch_vga(false); 827 return H_SUCCESS; 828 } 829 830 return H_UNSUPPORTED_FLAG; 831 } 832 833 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, 834 target_ulong mflags, 835 target_ulong value1, 836 target_ulong value2) 837 { 838 CPUState *cs; 839 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 840 841 if (!(pcc->insns_flags2 & PPC2_ISA207S)) { 842 return H_P2; 843 } 844 if (value1) { 845 return H_P3; 846 } 847 if (value2) { 848 return H_P4; 849 } 850 851 if (mflags == AIL_RESERVED) { 852 return H_UNSUPPORTED_FLAG; 853 } 854 855 CPU_FOREACH(cs) { 856 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL); 857 } 858 859 return H_SUCCESS; 860 } 861 862 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr, 863 target_ulong opcode, target_ulong *args) 864 { 865 target_ulong resource = args[1]; 866 target_ulong ret = H_P2; 867 868 switch (resource) { 869 case H_SET_MODE_RESOURCE_LE: 870 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]); 871 break; 872 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 873 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], 874 args[2], args[3]); 875 break; 876 } 877 878 return ret; 879 } 880 881 #define H_SIGNAL_SYS_RESET_ALL -1 882 #define H_SIGNAL_SYS_RESET_ALLBUTSELF -2 883 884 static target_ulong h_signal_sys_reset(PowerPCCPU *cpu, 885 sPAPRMachineState *spapr, 886 target_ulong opcode, target_ulong *args) 887 { 888 target_long target = args[0]; 889 CPUState *cs; 890 891 if (target < 0) { 892 /* Broadcast */ 893 if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) { 894 return H_PARAMETER; 895 } 896 897 CPU_FOREACH(cs) { 898 PowerPCCPU *c = POWERPC_CPU(cs); 899 900 if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) { 901 if (c == cpu) { 902 continue; 903 } 904 } 905 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 906 } 907 return H_SUCCESS; 908 909 } else { 910 /* Unicast */ 911 CPU_FOREACH(cs) { 912 if (cpu->cpu_dt_id == target) { 913 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); 914 return H_SUCCESS; 915 } 916 } 917 return H_PARAMETER; 918 } 919 } 920 921 static target_ulong h_client_architecture_support(PowerPCCPU *cpu, 922 sPAPRMachineState *spapr, 923 target_ulong opcode, 924 target_ulong *args) 925 { 926 target_ulong list = ppc64_phys_to_real(args[0]); 927 target_ulong ov_table; 928 bool explicit_match = false; /* Matched the CPU's real PVR */ 929 uint32_t max_compat = cpu->max_compat; 930 uint32_t best_compat = 0; 931 int i; 932 sPAPROptionVector *ov5_guest, *ov5_cas_old, *ov5_updates; 933 934 /* 935 * We scan the supplied table of PVRs looking for two things 936 * 1. Is our real CPU PVR in the list? 937 * 2. What's the "best" listed logical PVR 938 */ 939 for (i = 0; i < 512; ++i) { 940 uint32_t pvr, pvr_mask; 941 942 pvr_mask = ldl_be_phys(&address_space_memory, list); 943 pvr = ldl_be_phys(&address_space_memory, list + 4); 944 list += 8; 945 946 if (~pvr_mask & pvr) { 947 break; /* Terminator record */ 948 } 949 950 if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) { 951 explicit_match = true; 952 } else { 953 if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) { 954 best_compat = pvr; 955 } 956 } 957 } 958 959 if ((best_compat == 0) && (!explicit_match || max_compat)) { 960 /* We couldn't find a suitable compatibility mode, and either 961 * the guest doesn't support "raw" mode for this CPU, or raw 962 * mode is disabled because a maximum compat mode is set */ 963 return H_HARDWARE; 964 } 965 966 /* Parsing finished */ 967 trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat); 968 969 /* Update CPUs */ 970 if (cpu->compat_pvr != best_compat) { 971 Error *local_err = NULL; 972 973 ppc_set_compat_all(best_compat, &local_err); 974 if (local_err) { 975 error_report_err(local_err); 976 return H_HARDWARE; 977 } 978 } 979 980 /* For the future use: here @ov_table points to the first option vector */ 981 ov_table = list; 982 983 ov5_guest = spapr_ovec_parse_vector(ov_table, 5); 984 985 /* NOTE: there are actually a number of ov5 bits where input from the 986 * guest is always zero, and the platform/QEMU enables them independently 987 * of guest input. To model these properly we'd want some sort of mask, 988 * but since they only currently apply to memory migration as defined 989 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need 990 * to worry about this for now. 991 */ 992 ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas); 993 /* full range of negotiated ov5 capabilities */ 994 spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest); 995 spapr_ovec_cleanup(ov5_guest); 996 /* capabilities that have been added since CAS-generated guest reset. 997 * if capabilities have since been removed, generate another reset 998 */ 999 ov5_updates = spapr_ovec_new(); 1000 spapr->cas_reboot = spapr_ovec_diff(ov5_updates, 1001 ov5_cas_old, spapr->ov5_cas); 1002 1003 if (!spapr->cas_reboot) { 1004 spapr->cas_reboot = 1005 (spapr_h_cas_compose_response(spapr, args[1], args[2], 1006 ov5_updates) != 0); 1007 } 1008 spapr_ovec_cleanup(ov5_updates); 1009 1010 if (spapr->cas_reboot) { 1011 qemu_system_reset_request(); 1012 } 1013 1014 return H_SUCCESS; 1015 } 1016 1017 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; 1018 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; 1019 1020 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) 1021 { 1022 spapr_hcall_fn *slot; 1023 1024 if (opcode <= MAX_HCALL_OPCODE) { 1025 assert((opcode & 0x3) == 0); 1026 1027 slot = &papr_hypercall_table[opcode / 4]; 1028 } else { 1029 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); 1030 1031 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 1032 } 1033 1034 assert(!(*slot)); 1035 *slot = fn; 1036 } 1037 1038 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, 1039 target_ulong *args) 1040 { 1041 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 1042 1043 if ((opcode <= MAX_HCALL_OPCODE) 1044 && ((opcode & 0x3) == 0)) { 1045 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; 1046 1047 if (fn) { 1048 return fn(cpu, spapr, opcode, args); 1049 } 1050 } else if ((opcode >= KVMPPC_HCALL_BASE) && 1051 (opcode <= KVMPPC_HCALL_MAX)) { 1052 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 1053 1054 if (fn) { 1055 return fn(cpu, spapr, opcode, args); 1056 } 1057 } 1058 1059 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n", 1060 opcode); 1061 return H_FUNCTION; 1062 } 1063 1064 static void hypercall_register_types(void) 1065 { 1066 /* hcall-pft */ 1067 spapr_register_hypercall(H_ENTER, h_enter); 1068 spapr_register_hypercall(H_REMOVE, h_remove); 1069 spapr_register_hypercall(H_PROTECT, h_protect); 1070 spapr_register_hypercall(H_READ, h_read); 1071 1072 /* hcall-bulk */ 1073 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); 1074 1075 /* hcall-splpar */ 1076 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); 1077 spapr_register_hypercall(H_CEDE, h_cede); 1078 spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset); 1079 1080 /* processor register resource access h-calls */ 1081 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); 1082 spapr_register_hypercall(H_SET_DABR, h_set_dabr); 1083 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr); 1084 spapr_register_hypercall(H_PAGE_INIT, h_page_init); 1085 spapr_register_hypercall(H_SET_MODE, h_set_mode); 1086 1087 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate 1088 * here between the "CI" and the "CACHE" variants, they will use whatever 1089 * mapping attributes qemu is using. When using KVM, the kernel will 1090 * enforce the attributes more strongly 1091 */ 1092 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); 1093 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); 1094 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); 1095 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); 1096 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); 1097 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); 1098 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); 1099 1100 /* qemu/KVM-PPC specific hcalls */ 1101 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); 1102 1103 /* ibm,client-architecture-support support */ 1104 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); 1105 } 1106 1107 type_init(hypercall_register_types) 1108