1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "sysemu/sysemu.h" 4 #include "qemu/log.h" 5 #include "cpu.h" 6 #include "exec/exec-all.h" 7 #include "helper_regs.h" 8 #include "hw/ppc/spapr.h" 9 #include "mmu-hash64.h" 10 #include "cpu-models.h" 11 #include "trace.h" 12 #include "sysemu/kvm.h" 13 #include "kvm_ppc.h" 14 15 struct SPRSyncState { 16 CPUState *cs; 17 int spr; 18 target_ulong value; 19 target_ulong mask; 20 }; 21 22 static void do_spr_sync(void *arg) 23 { 24 struct SPRSyncState *s = arg; 25 PowerPCCPU *cpu = POWERPC_CPU(s->cs); 26 CPUPPCState *env = &cpu->env; 27 28 cpu_synchronize_state(s->cs); 29 env->spr[s->spr] &= ~s->mask; 30 env->spr[s->spr] |= s->value; 31 } 32 33 static void set_spr(CPUState *cs, int spr, target_ulong value, 34 target_ulong mask) 35 { 36 struct SPRSyncState s = { 37 .cs = cs, 38 .spr = spr, 39 .value = value, 40 .mask = mask 41 }; 42 run_on_cpu(cs, do_spr_sync, &s); 43 } 44 45 static bool has_spr(PowerPCCPU *cpu, int spr) 46 { 47 /* We can test whether the SPR is defined by checking for a valid name */ 48 return cpu->env.spr_cb[spr].name != NULL; 49 } 50 51 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index) 52 { 53 /* 54 * hash value/pteg group index is normalized by htab_mask 55 */ 56 if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) { 57 return false; 58 } 59 return true; 60 } 61 62 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr) 63 { 64 MachineState *machine = MACHINE(spapr); 65 MemoryHotplugState *hpms = &spapr->hotplug_memory; 66 67 if (addr < machine->ram_size) { 68 return true; 69 } 70 if ((addr >= hpms->base) 71 && ((addr - hpms->base) < memory_region_size(&hpms->mr))) { 72 return true; 73 } 74 75 return false; 76 } 77 78 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, 79 target_ulong opcode, target_ulong *args) 80 { 81 CPUPPCState *env = &cpu->env; 82 target_ulong flags = args[0]; 83 target_ulong pte_index = args[1]; 84 target_ulong pteh = args[2]; 85 target_ulong ptel = args[3]; 86 unsigned apshift, spshift; 87 target_ulong raddr; 88 target_ulong index; 89 uint64_t token; 90 91 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift); 92 if (!apshift) { 93 /* Bad page size encoding */ 94 return H_PARAMETER; 95 } 96 97 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); 98 99 if (is_ram_address(spapr, raddr)) { 100 /* Regular RAM - should have WIMG=0010 */ 101 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { 102 return H_PARAMETER; 103 } 104 } else { 105 /* Looks like an IO address */ 106 /* FIXME: What WIMG combinations could be sensible for IO? 107 * For now we allow WIMG=010x, but are there others? */ 108 /* FIXME: Should we check against registered IO addresses? */ 109 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) { 110 return H_PARAMETER; 111 } 112 } 113 114 pteh &= ~0x60ULL; 115 116 if (!valid_pte_index(env, pte_index)) { 117 return H_PARAMETER; 118 } 119 120 index = 0; 121 if (likely((flags & H_EXACT) == 0)) { 122 pte_index &= ~7ULL; 123 token = ppc_hash64_start_access(cpu, pte_index); 124 for (; index < 8; index++) { 125 if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) { 126 break; 127 } 128 } 129 ppc_hash64_stop_access(cpu, token); 130 if (index == 8) { 131 return H_PTEG_FULL; 132 } 133 } else { 134 token = ppc_hash64_start_access(cpu, pte_index); 135 if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) { 136 ppc_hash64_stop_access(cpu, token); 137 return H_PTEG_FULL; 138 } 139 ppc_hash64_stop_access(cpu, token); 140 } 141 142 ppc_hash64_store_hpte(cpu, pte_index + index, 143 pteh | HPTE64_V_HPTE_DIRTY, ptel); 144 145 args[0] = pte_index + index; 146 return H_SUCCESS; 147 } 148 149 typedef enum { 150 REMOVE_SUCCESS = 0, 151 REMOVE_NOT_FOUND = 1, 152 REMOVE_PARM = 2, 153 REMOVE_HW = 3, 154 } RemoveResult; 155 156 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, 157 target_ulong avpn, 158 target_ulong flags, 159 target_ulong *vp, target_ulong *rp) 160 { 161 CPUPPCState *env = &cpu->env; 162 uint64_t token; 163 target_ulong v, r; 164 165 if (!valid_pte_index(env, ptex)) { 166 return REMOVE_PARM; 167 } 168 169 token = ppc_hash64_start_access(cpu, ptex); 170 v = ppc_hash64_load_hpte0(cpu, token, 0); 171 r = ppc_hash64_load_hpte1(cpu, token, 0); 172 ppc_hash64_stop_access(cpu, token); 173 174 if ((v & HPTE64_V_VALID) == 0 || 175 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || 176 ((flags & H_ANDCOND) && (v & avpn) != 0)) { 177 return REMOVE_NOT_FOUND; 178 } 179 *vp = v; 180 *rp = r; 181 ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); 182 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); 183 return REMOVE_SUCCESS; 184 } 185 186 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 187 target_ulong opcode, target_ulong *args) 188 { 189 CPUPPCState *env = &cpu->env; 190 target_ulong flags = args[0]; 191 target_ulong pte_index = args[1]; 192 target_ulong avpn = args[2]; 193 RemoveResult ret; 194 195 ret = remove_hpte(cpu, pte_index, avpn, flags, 196 &args[0], &args[1]); 197 198 switch (ret) { 199 case REMOVE_SUCCESS: 200 check_tlb_flush(env); 201 return H_SUCCESS; 202 203 case REMOVE_NOT_FOUND: 204 return H_NOT_FOUND; 205 206 case REMOVE_PARM: 207 return H_PARAMETER; 208 209 case REMOVE_HW: 210 return H_HARDWARE; 211 } 212 213 g_assert_not_reached(); 214 } 215 216 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL 217 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL 218 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL 219 #define H_BULK_REMOVE_END 0xc000000000000000ULL 220 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL 221 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL 222 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL 223 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL 224 #define H_BULK_REMOVE_HW 0x3000000000000000ULL 225 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL 226 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL 227 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL 228 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL 229 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL 230 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL 231 232 #define H_BULK_REMOVE_MAX_BATCH 4 233 234 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 235 target_ulong opcode, target_ulong *args) 236 { 237 CPUPPCState *env = &cpu->env; 238 int i; 239 target_ulong rc = H_SUCCESS; 240 241 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 242 target_ulong *tsh = &args[i*2]; 243 target_ulong tsl = args[i*2 + 1]; 244 target_ulong v, r, ret; 245 246 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { 247 break; 248 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { 249 return H_PARAMETER; 250 } 251 252 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; 253 *tsh |= H_BULK_REMOVE_RESPONSE; 254 255 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { 256 *tsh |= H_BULK_REMOVE_PARM; 257 return H_PARAMETER; 258 } 259 260 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl, 261 (*tsh & H_BULK_REMOVE_FLAGS) >> 26, 262 &v, &r); 263 264 *tsh |= ret << 60; 265 266 switch (ret) { 267 case REMOVE_SUCCESS: 268 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; 269 break; 270 271 case REMOVE_PARM: 272 rc = H_PARAMETER; 273 goto exit; 274 275 case REMOVE_HW: 276 rc = H_HARDWARE; 277 goto exit; 278 } 279 } 280 exit: 281 check_tlb_flush(env); 282 283 return rc; 284 } 285 286 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, 287 target_ulong opcode, target_ulong *args) 288 { 289 CPUPPCState *env = &cpu->env; 290 target_ulong flags = args[0]; 291 target_ulong pte_index = args[1]; 292 target_ulong avpn = args[2]; 293 uint64_t token; 294 target_ulong v, r; 295 296 if (!valid_pte_index(env, pte_index)) { 297 return H_PARAMETER; 298 } 299 300 token = ppc_hash64_start_access(cpu, pte_index); 301 v = ppc_hash64_load_hpte0(cpu, token, 0); 302 r = ppc_hash64_load_hpte1(cpu, token, 0); 303 ppc_hash64_stop_access(cpu, token); 304 305 if ((v & HPTE64_V_VALID) == 0 || 306 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { 307 return H_NOT_FOUND; 308 } 309 310 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | 311 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); 312 r |= (flags << 55) & HPTE64_R_PP0; 313 r |= (flags << 48) & HPTE64_R_KEY_HI; 314 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); 315 ppc_hash64_store_hpte(cpu, pte_index, 316 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); 317 ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r); 318 /* Don't need a memory barrier, due to qemu's global lock */ 319 ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r); 320 return H_SUCCESS; 321 } 322 323 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr, 324 target_ulong opcode, target_ulong *args) 325 { 326 CPUPPCState *env = &cpu->env; 327 target_ulong flags = args[0]; 328 target_ulong pte_index = args[1]; 329 uint8_t *hpte; 330 int i, ridx, n_entries = 1; 331 332 if (!valid_pte_index(env, pte_index)) { 333 return H_PARAMETER; 334 } 335 336 if (flags & H_READ_4) { 337 /* Clear the two low order bits */ 338 pte_index &= ~(3ULL); 339 n_entries = 4; 340 } 341 342 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64); 343 344 for (i = 0, ridx = 0; i < n_entries; i++) { 345 args[ridx++] = ldq_p(hpte); 346 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); 347 hpte += HASH_PTE_SIZE_64; 348 } 349 350 return H_SUCCESS; 351 } 352 353 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr, 354 target_ulong opcode, target_ulong *args) 355 { 356 cpu_synchronize_state(CPU(cpu)); 357 cpu->env.spr[SPR_SPRG0] = args[0]; 358 359 return H_SUCCESS; 360 } 361 362 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 363 target_ulong opcode, target_ulong *args) 364 { 365 if (!has_spr(cpu, SPR_DABR)) { 366 return H_HARDWARE; /* DABR register not available */ 367 } 368 cpu_synchronize_state(CPU(cpu)); 369 370 if (has_spr(cpu, SPR_DABRX)) { 371 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */ 372 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */ 373 return H_RESERVED_DABR; 374 } 375 376 cpu->env.spr[SPR_DABR] = args[0]; 377 return H_SUCCESS; 378 } 379 380 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 381 target_ulong opcode, target_ulong *args) 382 { 383 target_ulong dabrx = args[1]; 384 385 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) { 386 return H_HARDWARE; 387 } 388 389 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0 390 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) { 391 return H_PARAMETER; 392 } 393 394 cpu_synchronize_state(CPU(cpu)); 395 cpu->env.spr[SPR_DABRX] = dabrx; 396 cpu->env.spr[SPR_DABR] = args[0]; 397 398 return H_SUCCESS; 399 } 400 401 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr, 402 target_ulong opcode, target_ulong *args) 403 { 404 target_ulong flags = args[0]; 405 hwaddr dst = args[1]; 406 hwaddr src = args[2]; 407 hwaddr len = TARGET_PAGE_SIZE; 408 uint8_t *pdst, *psrc; 409 target_long ret = H_SUCCESS; 410 411 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE 412 | H_COPY_PAGE | H_ZERO_PAGE)) { 413 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n", 414 flags); 415 return H_PARAMETER; 416 } 417 418 /* Map-in destination */ 419 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) { 420 return H_PARAMETER; 421 } 422 pdst = cpu_physical_memory_map(dst, &len, 1); 423 if (!pdst || len != TARGET_PAGE_SIZE) { 424 return H_PARAMETER; 425 } 426 427 if (flags & H_COPY_PAGE) { 428 /* Map-in source, copy to destination, and unmap source again */ 429 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) { 430 ret = H_PARAMETER; 431 goto unmap_out; 432 } 433 psrc = cpu_physical_memory_map(src, &len, 0); 434 if (!psrc || len != TARGET_PAGE_SIZE) { 435 ret = H_PARAMETER; 436 goto unmap_out; 437 } 438 memcpy(pdst, psrc, len); 439 cpu_physical_memory_unmap(psrc, len, 0, len); 440 } else if (flags & H_ZERO_PAGE) { 441 memset(pdst, 0, len); /* Just clear the destination page */ 442 } 443 444 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) { 445 kvmppc_dcbst_range(cpu, pdst, len); 446 } 447 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { 448 if (kvm_enabled()) { 449 kvmppc_icbi_range(cpu, pdst, len); 450 } else { 451 tb_flush(CPU(cpu)); 452 } 453 } 454 455 unmap_out: 456 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len); 457 return ret; 458 } 459 460 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL 461 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL 462 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL 463 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL 464 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL 465 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL 466 467 #define VPA_MIN_SIZE 640 468 #define VPA_SIZE_OFFSET 0x4 469 #define VPA_SHARED_PROC_OFFSET 0x9 470 #define VPA_SHARED_PROC_VAL 0x2 471 472 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) 473 { 474 CPUState *cs = CPU(ppc_env_get_cpu(env)); 475 uint16_t size; 476 uint8_t tmp; 477 478 if (vpa == 0) { 479 hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); 480 return H_HARDWARE; 481 } 482 483 if (vpa % env->dcache_line_size) { 484 return H_PARAMETER; 485 } 486 /* FIXME: bounds check the address */ 487 488 size = lduw_be_phys(cs->as, vpa + 0x4); 489 490 if (size < VPA_MIN_SIZE) { 491 return H_PARAMETER; 492 } 493 494 /* VPA is not allowed to cross a page boundary */ 495 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { 496 return H_PARAMETER; 497 } 498 499 env->vpa_addr = vpa; 500 501 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET); 502 tmp |= VPA_SHARED_PROC_VAL; 503 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); 504 505 return H_SUCCESS; 506 } 507 508 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa) 509 { 510 if (env->slb_shadow_addr) { 511 return H_RESOURCE; 512 } 513 514 if (env->dtl_addr) { 515 return H_RESOURCE; 516 } 517 518 env->vpa_addr = 0; 519 return H_SUCCESS; 520 } 521 522 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) 523 { 524 CPUState *cs = CPU(ppc_env_get_cpu(env)); 525 uint32_t size; 526 527 if (addr == 0) { 528 hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); 529 return H_HARDWARE; 530 } 531 532 size = ldl_be_phys(cs->as, addr + 0x4); 533 if (size < 0x8) { 534 return H_PARAMETER; 535 } 536 537 if ((addr / 4096) != ((addr + size - 1) / 4096)) { 538 return H_PARAMETER; 539 } 540 541 if (!env->vpa_addr) { 542 return H_RESOURCE; 543 } 544 545 env->slb_shadow_addr = addr; 546 env->slb_shadow_size = size; 547 548 return H_SUCCESS; 549 } 550 551 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr) 552 { 553 env->slb_shadow_addr = 0; 554 env->slb_shadow_size = 0; 555 return H_SUCCESS; 556 } 557 558 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) 559 { 560 CPUState *cs = CPU(ppc_env_get_cpu(env)); 561 uint32_t size; 562 563 if (addr == 0) { 564 hcall_dprintf("Can't cope with DTL at logical 0\n"); 565 return H_HARDWARE; 566 } 567 568 size = ldl_be_phys(cs->as, addr + 0x4); 569 570 if (size < 48) { 571 return H_PARAMETER; 572 } 573 574 if (!env->vpa_addr) { 575 return H_RESOURCE; 576 } 577 578 env->dtl_addr = addr; 579 env->dtl_size = size; 580 581 return H_SUCCESS; 582 } 583 584 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr) 585 { 586 env->dtl_addr = 0; 587 env->dtl_size = 0; 588 589 return H_SUCCESS; 590 } 591 592 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr, 593 target_ulong opcode, target_ulong *args) 594 { 595 target_ulong flags = args[0]; 596 target_ulong procno = args[1]; 597 target_ulong vpa = args[2]; 598 target_ulong ret = H_PARAMETER; 599 CPUPPCState *tenv; 600 PowerPCCPU *tcpu; 601 602 tcpu = ppc_get_vcpu_by_dt_id(procno); 603 if (!tcpu) { 604 return H_PARAMETER; 605 } 606 tenv = &tcpu->env; 607 608 switch (flags) { 609 case FLAGS_REGISTER_VPA: 610 ret = register_vpa(tenv, vpa); 611 break; 612 613 case FLAGS_DEREGISTER_VPA: 614 ret = deregister_vpa(tenv, vpa); 615 break; 616 617 case FLAGS_REGISTER_SLBSHADOW: 618 ret = register_slb_shadow(tenv, vpa); 619 break; 620 621 case FLAGS_DEREGISTER_SLBSHADOW: 622 ret = deregister_slb_shadow(tenv, vpa); 623 break; 624 625 case FLAGS_REGISTER_DTL: 626 ret = register_dtl(tenv, vpa); 627 break; 628 629 case FLAGS_DEREGISTER_DTL: 630 ret = deregister_dtl(tenv, vpa); 631 break; 632 } 633 634 return ret; 635 } 636 637 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr, 638 target_ulong opcode, target_ulong *args) 639 { 640 CPUPPCState *env = &cpu->env; 641 CPUState *cs = CPU(cpu); 642 643 env->msr |= (1ULL << MSR_EE); 644 hreg_compute_hflags(env); 645 if (!cpu_has_work(cs)) { 646 cs->halted = 1; 647 cs->exception_index = EXCP_HLT; 648 cs->exit_request = 1; 649 } 650 return H_SUCCESS; 651 } 652 653 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr, 654 target_ulong opcode, target_ulong *args) 655 { 656 target_ulong rtas_r3 = args[0]; 657 uint32_t token = rtas_ld(rtas_r3, 0); 658 uint32_t nargs = rtas_ld(rtas_r3, 1); 659 uint32_t nret = rtas_ld(rtas_r3, 2); 660 661 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, 662 nret, rtas_r3 + 12 + 4*nargs); 663 } 664 665 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr, 666 target_ulong opcode, target_ulong *args) 667 { 668 CPUState *cs = CPU(cpu); 669 target_ulong size = args[0]; 670 target_ulong addr = args[1]; 671 672 switch (size) { 673 case 1: 674 args[0] = ldub_phys(cs->as, addr); 675 return H_SUCCESS; 676 case 2: 677 args[0] = lduw_phys(cs->as, addr); 678 return H_SUCCESS; 679 case 4: 680 args[0] = ldl_phys(cs->as, addr); 681 return H_SUCCESS; 682 case 8: 683 args[0] = ldq_phys(cs->as, addr); 684 return H_SUCCESS; 685 } 686 return H_PARAMETER; 687 } 688 689 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr, 690 target_ulong opcode, target_ulong *args) 691 { 692 CPUState *cs = CPU(cpu); 693 694 target_ulong size = args[0]; 695 target_ulong addr = args[1]; 696 target_ulong val = args[2]; 697 698 switch (size) { 699 case 1: 700 stb_phys(cs->as, addr, val); 701 return H_SUCCESS; 702 case 2: 703 stw_phys(cs->as, addr, val); 704 return H_SUCCESS; 705 case 4: 706 stl_phys(cs->as, addr, val); 707 return H_SUCCESS; 708 case 8: 709 stq_phys(cs->as, addr, val); 710 return H_SUCCESS; 711 } 712 return H_PARAMETER; 713 } 714 715 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr, 716 target_ulong opcode, target_ulong *args) 717 { 718 CPUState *cs = CPU(cpu); 719 720 target_ulong dst = args[0]; /* Destination address */ 721 target_ulong src = args[1]; /* Source address */ 722 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ 723 target_ulong count = args[3]; /* Element count */ 724 target_ulong op = args[4]; /* 0 = copy, 1 = invert */ 725 uint64_t tmp; 726 unsigned int mask = (1 << esize) - 1; 727 int step = 1 << esize; 728 729 if (count > 0x80000000) { 730 return H_PARAMETER; 731 } 732 733 if ((dst & mask) || (src & mask) || (op > 1)) { 734 return H_PARAMETER; 735 } 736 737 if (dst >= src && dst < (src + (count << esize))) { 738 dst = dst + ((count - 1) << esize); 739 src = src + ((count - 1) << esize); 740 step = -step; 741 } 742 743 while (count--) { 744 switch (esize) { 745 case 0: 746 tmp = ldub_phys(cs->as, src); 747 break; 748 case 1: 749 tmp = lduw_phys(cs->as, src); 750 break; 751 case 2: 752 tmp = ldl_phys(cs->as, src); 753 break; 754 case 3: 755 tmp = ldq_phys(cs->as, src); 756 break; 757 default: 758 return H_PARAMETER; 759 } 760 if (op == 1) { 761 tmp = ~tmp; 762 } 763 switch (esize) { 764 case 0: 765 stb_phys(cs->as, dst, tmp); 766 break; 767 case 1: 768 stw_phys(cs->as, dst, tmp); 769 break; 770 case 2: 771 stl_phys(cs->as, dst, tmp); 772 break; 773 case 3: 774 stq_phys(cs->as, dst, tmp); 775 break; 776 } 777 dst = dst + step; 778 src = src + step; 779 } 780 781 return H_SUCCESS; 782 } 783 784 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 785 target_ulong opcode, target_ulong *args) 786 { 787 /* Nothing to do on emulation, KVM will trap this in the kernel */ 788 return H_SUCCESS; 789 } 790 791 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr, 792 target_ulong opcode, target_ulong *args) 793 { 794 /* Nothing to do on emulation, KVM will trap this in the kernel */ 795 return H_SUCCESS; 796 } 797 798 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, 799 target_ulong mflags, 800 target_ulong value1, 801 target_ulong value2) 802 { 803 CPUState *cs; 804 805 if (value1) { 806 return H_P3; 807 } 808 if (value2) { 809 return H_P4; 810 } 811 812 switch (mflags) { 813 case H_SET_MODE_ENDIAN_BIG: 814 CPU_FOREACH(cs) { 815 set_spr(cs, SPR_LPCR, 0, LPCR_ILE); 816 } 817 spapr_pci_switch_vga(true); 818 return H_SUCCESS; 819 820 case H_SET_MODE_ENDIAN_LITTLE: 821 CPU_FOREACH(cs) { 822 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE); 823 } 824 spapr_pci_switch_vga(false); 825 return H_SUCCESS; 826 } 827 828 return H_UNSUPPORTED_FLAG; 829 } 830 831 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, 832 target_ulong mflags, 833 target_ulong value1, 834 target_ulong value2) 835 { 836 CPUState *cs; 837 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 838 839 if (!(pcc->insns_flags2 & PPC2_ISA207S)) { 840 return H_P2; 841 } 842 if (value1) { 843 return H_P3; 844 } 845 if (value2) { 846 return H_P4; 847 } 848 849 if (mflags == AIL_RESERVED) { 850 return H_UNSUPPORTED_FLAG; 851 } 852 853 CPU_FOREACH(cs) { 854 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL); 855 } 856 857 return H_SUCCESS; 858 } 859 860 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr, 861 target_ulong opcode, target_ulong *args) 862 { 863 target_ulong resource = args[1]; 864 target_ulong ret = H_P2; 865 866 switch (resource) { 867 case H_SET_MODE_RESOURCE_LE: 868 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]); 869 break; 870 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 871 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], 872 args[2], args[3]); 873 break; 874 } 875 876 return ret; 877 } 878 879 /* 880 * Return the offset to the requested option vector @vector in the 881 * option vector table @table. 882 */ 883 static target_ulong cas_get_option_vector(int vector, target_ulong table) 884 { 885 int i; 886 char nr_vectors, nr_entries; 887 888 if (!table) { 889 return 0; 890 } 891 892 nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1; 893 if (!vector || vector > nr_vectors) { 894 return 0; 895 } 896 table++; /* skip nr option vectors */ 897 898 for (i = 0; i < vector - 1; i++) { 899 nr_entries = ldl_phys(&address_space_memory, table) >> 24; 900 table += nr_entries + 2; 901 } 902 return table; 903 } 904 905 typedef struct { 906 PowerPCCPU *cpu; 907 uint32_t cpu_version; 908 Error *err; 909 } SetCompatState; 910 911 static void do_set_compat(void *arg) 912 { 913 SetCompatState *s = arg; 914 915 cpu_synchronize_state(CPU(s->cpu)); 916 ppc_set_compat(s->cpu, s->cpu_version, &s->err); 917 } 918 919 #define get_compat_level(cpuver) ( \ 920 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \ 921 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \ 922 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \ 923 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0) 924 925 static void cas_handle_compat_cpu(PowerPCCPUClass *pcc, uint32_t pvr, 926 unsigned max_lvl, unsigned *compat_lvl, 927 unsigned *cpu_version) 928 { 929 unsigned lvl = get_compat_level(pvr); 930 bool is205, is206, is207; 931 932 if (!lvl) { 933 return; 934 } 935 936 /* If it is a logical PVR, try to determine the highest level */ 937 is205 = (pcc->pcr_supported & PCR_COMPAT_2_05) && 938 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05)); 939 is206 = (pcc->pcr_supported & PCR_COMPAT_2_06) && 940 ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) || 941 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS))); 942 is207 = (pcc->pcr_supported & PCR_COMPAT_2_07) && 943 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_07)); 944 945 if (is205 || is206 || is207) { 946 if (!max_lvl) { 947 /* User did not set the level, choose the highest */ 948 if (*compat_lvl <= lvl) { 949 *compat_lvl = lvl; 950 *cpu_version = pvr; 951 } 952 } else if (max_lvl >= lvl) { 953 /* User chose the level, don't set higher than this */ 954 *compat_lvl = lvl; 955 *cpu_version = pvr; 956 } 957 } 958 } 959 960 #define OV5_DRCONF_MEMORY 0x20 961 962 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_, 963 sPAPRMachineState *spapr, 964 target_ulong opcode, 965 target_ulong *args) 966 { 967 target_ulong list = ppc64_phys_to_real(args[0]); 968 target_ulong ov_table, ov5; 969 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu_); 970 CPUState *cs; 971 bool cpu_match = false, cpu_update = true, memory_update = false; 972 unsigned old_cpu_version = cpu_->cpu_version; 973 unsigned compat_lvl = 0, cpu_version = 0; 974 unsigned max_lvl = get_compat_level(cpu_->max_compat); 975 int counter; 976 char ov5_byte2; 977 978 /* Parse PVR list */ 979 for (counter = 0; counter < 512; ++counter) { 980 uint32_t pvr, pvr_mask; 981 982 pvr_mask = ldl_be_phys(&address_space_memory, list); 983 list += 4; 984 pvr = ldl_be_phys(&address_space_memory, list); 985 list += 4; 986 987 trace_spapr_cas_pvr_try(pvr); 988 if (!max_lvl && 989 ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) { 990 cpu_match = true; 991 cpu_version = 0; 992 } else if (pvr == cpu_->cpu_version) { 993 cpu_match = true; 994 cpu_version = cpu_->cpu_version; 995 } else if (!cpu_match) { 996 cas_handle_compat_cpu(pcc, pvr, max_lvl, &compat_lvl, &cpu_version); 997 } 998 /* Terminator record */ 999 if (~pvr_mask & pvr) { 1000 break; 1001 } 1002 } 1003 1004 /* Parsing finished */ 1005 trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match, 1006 cpu_version, pcc->pcr_mask); 1007 1008 /* Update CPUs */ 1009 if (old_cpu_version != cpu_version) { 1010 CPU_FOREACH(cs) { 1011 SetCompatState s = { 1012 .cpu = POWERPC_CPU(cs), 1013 .cpu_version = cpu_version, 1014 .err = NULL, 1015 }; 1016 1017 run_on_cpu(cs, do_set_compat, &s); 1018 1019 if (s.err) { 1020 error_report_err(s.err); 1021 return H_HARDWARE; 1022 } 1023 } 1024 } 1025 1026 if (!cpu_version) { 1027 cpu_update = false; 1028 } 1029 1030 /* For the future use: here @ov_table points to the first option vector */ 1031 ov_table = list; 1032 1033 ov5 = cas_get_option_vector(5, ov_table); 1034 if (!ov5) { 1035 return H_SUCCESS; 1036 } 1037 1038 /* @list now points to OV 5 */ 1039 ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2); 1040 if (ov5_byte2 & OV5_DRCONF_MEMORY) { 1041 memory_update = true; 1042 } 1043 1044 if (spapr_h_cas_compose_response(spapr, args[1], args[2], 1045 cpu_update, memory_update)) { 1046 qemu_system_reset_request(); 1047 } 1048 1049 return H_SUCCESS; 1050 } 1051 1052 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; 1053 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; 1054 1055 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) 1056 { 1057 spapr_hcall_fn *slot; 1058 1059 if (opcode <= MAX_HCALL_OPCODE) { 1060 assert((opcode & 0x3) == 0); 1061 1062 slot = &papr_hypercall_table[opcode / 4]; 1063 } else { 1064 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); 1065 1066 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 1067 } 1068 1069 assert(!(*slot)); 1070 *slot = fn; 1071 } 1072 1073 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, 1074 target_ulong *args) 1075 { 1076 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 1077 1078 if ((opcode <= MAX_HCALL_OPCODE) 1079 && ((opcode & 0x3) == 0)) { 1080 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; 1081 1082 if (fn) { 1083 return fn(cpu, spapr, opcode, args); 1084 } 1085 } else if ((opcode >= KVMPPC_HCALL_BASE) && 1086 (opcode <= KVMPPC_HCALL_MAX)) { 1087 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 1088 1089 if (fn) { 1090 return fn(cpu, spapr, opcode, args); 1091 } 1092 } 1093 1094 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n", 1095 opcode); 1096 return H_FUNCTION; 1097 } 1098 1099 static void hypercall_register_types(void) 1100 { 1101 /* hcall-pft */ 1102 spapr_register_hypercall(H_ENTER, h_enter); 1103 spapr_register_hypercall(H_REMOVE, h_remove); 1104 spapr_register_hypercall(H_PROTECT, h_protect); 1105 spapr_register_hypercall(H_READ, h_read); 1106 1107 /* hcall-bulk */ 1108 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); 1109 1110 /* hcall-splpar */ 1111 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); 1112 spapr_register_hypercall(H_CEDE, h_cede); 1113 1114 /* processor register resource access h-calls */ 1115 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); 1116 spapr_register_hypercall(H_SET_DABR, h_set_dabr); 1117 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr); 1118 spapr_register_hypercall(H_PAGE_INIT, h_page_init); 1119 spapr_register_hypercall(H_SET_MODE, h_set_mode); 1120 1121 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate 1122 * here between the "CI" and the "CACHE" variants, they will use whatever 1123 * mapping attributes qemu is using. When using KVM, the kernel will 1124 * enforce the attributes more strongly 1125 */ 1126 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); 1127 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); 1128 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); 1129 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); 1130 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); 1131 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); 1132 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); 1133 1134 /* qemu/KVM-PPC specific hcalls */ 1135 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); 1136 1137 /* ibm,client-architecture-support support */ 1138 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); 1139 } 1140 1141 type_init(hypercall_register_types) 1142