1 #include "sysemu/sysemu.h" 2 #include "cpu.h" 3 #include "helper_regs.h" 4 #include "hw/ppc/spapr.h" 5 #include "mmu-hash64.h" 6 #include "cpu-models.h" 7 #include "trace.h" 8 #include "kvm_ppc.h" 9 10 struct SPRSyncState { 11 CPUState *cs; 12 int spr; 13 target_ulong value; 14 target_ulong mask; 15 }; 16 17 static void do_spr_sync(void *arg) 18 { 19 struct SPRSyncState *s = arg; 20 PowerPCCPU *cpu = POWERPC_CPU(s->cs); 21 CPUPPCState *env = &cpu->env; 22 23 cpu_synchronize_state(s->cs); 24 env->spr[s->spr] &= ~s->mask; 25 env->spr[s->spr] |= s->value; 26 } 27 28 static void set_spr(CPUState *cs, int spr, target_ulong value, 29 target_ulong mask) 30 { 31 struct SPRSyncState s = { 32 .cs = cs, 33 .spr = spr, 34 .value = value, 35 .mask = mask 36 }; 37 run_on_cpu(cs, do_spr_sync, &s); 38 } 39 40 static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r, 41 target_ulong pte_index) 42 { 43 target_ulong rb, va_low; 44 45 rb = (v & ~0x7fULL) << 16; /* AVA field */ 46 va_low = pte_index >> 3; 47 if (v & HPTE64_V_SECONDARY) { 48 va_low = ~va_low; 49 } 50 /* xor vsid from AVA */ 51 if (!(v & HPTE64_V_1TB_SEG)) { 52 va_low ^= v >> 12; 53 } else { 54 va_low ^= v >> 24; 55 } 56 va_low &= 0x7ff; 57 if (v & HPTE64_V_LARGE) { 58 rb |= 1; /* L field */ 59 #if 0 /* Disable that P7 specific bit for now */ 60 if (r & 0xff000) { 61 /* non-16MB large page, must be 64k */ 62 /* (masks depend on page size) */ 63 rb |= 0x1000; /* page encoding in LP field */ 64 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ 65 rb |= (va_low & 0xfe); /* AVAL field */ 66 } 67 #endif 68 } else { 69 /* 4kB page */ 70 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of AVA */ 71 } 72 rb |= (v >> 54) & 0x300; /* B field */ 73 return rb; 74 } 75 76 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index) 77 { 78 /* 79 * hash value/pteg group index is normalized by htab_mask 80 */ 81 if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) { 82 return false; 83 } 84 return true; 85 } 86 87 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, 88 target_ulong opcode, target_ulong *args) 89 { 90 MachineState *machine = MACHINE(spapr); 91 CPUPPCState *env = &cpu->env; 92 target_ulong flags = args[0]; 93 target_ulong pte_index = args[1]; 94 target_ulong pteh = args[2]; 95 target_ulong ptel = args[3]; 96 target_ulong page_shift = 12; 97 target_ulong raddr; 98 target_ulong index; 99 uint64_t token; 100 101 /* only handle 4k and 16M pages for now */ 102 if (pteh & HPTE64_V_LARGE) { 103 #if 0 /* We don't support 64k pages yet */ 104 if ((ptel & 0xf000) == 0x1000) { 105 /* 64k page */ 106 } else 107 #endif 108 if ((ptel & 0xff000) == 0) { 109 /* 16M page */ 110 page_shift = 24; 111 /* lowest AVA bit must be 0 for 16M pages */ 112 if (pteh & 0x80) { 113 return H_PARAMETER; 114 } 115 } else { 116 return H_PARAMETER; 117 } 118 } 119 120 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1); 121 122 if (raddr < machine->ram_size) { 123 /* Regular RAM - should have WIMG=0010 */ 124 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { 125 return H_PARAMETER; 126 } 127 } else { 128 /* Looks like an IO address */ 129 /* FIXME: What WIMG combinations could be sensible for IO? 130 * For now we allow WIMG=010x, but are there others? */ 131 /* FIXME: Should we check against registered IO addresses? */ 132 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) { 133 return H_PARAMETER; 134 } 135 } 136 137 pteh &= ~0x60ULL; 138 139 if (!valid_pte_index(env, pte_index)) { 140 return H_PARAMETER; 141 } 142 143 index = 0; 144 if (likely((flags & H_EXACT) == 0)) { 145 pte_index &= ~7ULL; 146 token = ppc_hash64_start_access(cpu, pte_index); 147 for (; index < 8; index++) { 148 if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) { 149 break; 150 } 151 } 152 ppc_hash64_stop_access(token); 153 if (index == 8) { 154 return H_PTEG_FULL; 155 } 156 } else { 157 token = ppc_hash64_start_access(cpu, pte_index); 158 if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) { 159 ppc_hash64_stop_access(token); 160 return H_PTEG_FULL; 161 } 162 ppc_hash64_stop_access(token); 163 } 164 165 ppc_hash64_store_hpte(env, pte_index + index, 166 pteh | HPTE64_V_HPTE_DIRTY, ptel); 167 168 args[0] = pte_index + index; 169 return H_SUCCESS; 170 } 171 172 typedef enum { 173 REMOVE_SUCCESS = 0, 174 REMOVE_NOT_FOUND = 1, 175 REMOVE_PARM = 2, 176 REMOVE_HW = 3, 177 } RemoveResult; 178 179 static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex, 180 target_ulong avpn, 181 target_ulong flags, 182 target_ulong *vp, target_ulong *rp) 183 { 184 uint64_t token; 185 target_ulong v, r, rb; 186 187 if (!valid_pte_index(env, ptex)) { 188 return REMOVE_PARM; 189 } 190 191 token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex); 192 v = ppc_hash64_load_hpte0(env, token, 0); 193 r = ppc_hash64_load_hpte1(env, token, 0); 194 ppc_hash64_stop_access(token); 195 196 if ((v & HPTE64_V_VALID) == 0 || 197 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || 198 ((flags & H_ANDCOND) && (v & avpn) != 0)) { 199 return REMOVE_NOT_FOUND; 200 } 201 *vp = v; 202 *rp = r; 203 ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0); 204 rb = compute_tlbie_rb(v, r, ptex); 205 ppc_tlb_invalidate_one(env, rb); 206 return REMOVE_SUCCESS; 207 } 208 209 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 210 target_ulong opcode, target_ulong *args) 211 { 212 CPUPPCState *env = &cpu->env; 213 target_ulong flags = args[0]; 214 target_ulong pte_index = args[1]; 215 target_ulong avpn = args[2]; 216 RemoveResult ret; 217 218 ret = remove_hpte(env, pte_index, avpn, flags, 219 &args[0], &args[1]); 220 221 switch (ret) { 222 case REMOVE_SUCCESS: 223 return H_SUCCESS; 224 225 case REMOVE_NOT_FOUND: 226 return H_NOT_FOUND; 227 228 case REMOVE_PARM: 229 return H_PARAMETER; 230 231 case REMOVE_HW: 232 return H_HARDWARE; 233 } 234 235 g_assert_not_reached(); 236 } 237 238 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL 239 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL 240 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL 241 #define H_BULK_REMOVE_END 0xc000000000000000ULL 242 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL 243 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL 244 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL 245 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL 246 #define H_BULK_REMOVE_HW 0x3000000000000000ULL 247 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL 248 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL 249 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL 250 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL 251 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL 252 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL 253 254 #define H_BULK_REMOVE_MAX_BATCH 4 255 256 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 257 target_ulong opcode, target_ulong *args) 258 { 259 CPUPPCState *env = &cpu->env; 260 int i; 261 262 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 263 target_ulong *tsh = &args[i*2]; 264 target_ulong tsl = args[i*2 + 1]; 265 target_ulong v, r, ret; 266 267 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { 268 break; 269 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { 270 return H_PARAMETER; 271 } 272 273 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; 274 *tsh |= H_BULK_REMOVE_RESPONSE; 275 276 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { 277 *tsh |= H_BULK_REMOVE_PARM; 278 return H_PARAMETER; 279 } 280 281 ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl, 282 (*tsh & H_BULK_REMOVE_FLAGS) >> 26, 283 &v, &r); 284 285 *tsh |= ret << 60; 286 287 switch (ret) { 288 case REMOVE_SUCCESS: 289 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; 290 break; 291 292 case REMOVE_PARM: 293 return H_PARAMETER; 294 295 case REMOVE_HW: 296 return H_HARDWARE; 297 } 298 } 299 300 return H_SUCCESS; 301 } 302 303 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, 304 target_ulong opcode, target_ulong *args) 305 { 306 CPUPPCState *env = &cpu->env; 307 target_ulong flags = args[0]; 308 target_ulong pte_index = args[1]; 309 target_ulong avpn = args[2]; 310 uint64_t token; 311 target_ulong v, r, rb; 312 313 if (!valid_pte_index(env, pte_index)) { 314 return H_PARAMETER; 315 } 316 317 token = ppc_hash64_start_access(cpu, pte_index); 318 v = ppc_hash64_load_hpte0(env, token, 0); 319 r = ppc_hash64_load_hpte1(env, token, 0); 320 ppc_hash64_stop_access(token); 321 322 if ((v & HPTE64_V_VALID) == 0 || 323 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { 324 return H_NOT_FOUND; 325 } 326 327 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | 328 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); 329 r |= (flags << 55) & HPTE64_R_PP0; 330 r |= (flags << 48) & HPTE64_R_KEY_HI; 331 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); 332 rb = compute_tlbie_rb(v, r, pte_index); 333 ppc_hash64_store_hpte(env, pte_index, 334 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); 335 ppc_tlb_invalidate_one(env, rb); 336 /* Don't need a memory barrier, due to qemu's global lock */ 337 ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r); 338 return H_SUCCESS; 339 } 340 341 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr, 342 target_ulong opcode, target_ulong *args) 343 { 344 CPUPPCState *env = &cpu->env; 345 target_ulong flags = args[0]; 346 target_ulong pte_index = args[1]; 347 uint8_t *hpte; 348 int i, ridx, n_entries = 1; 349 350 if (!valid_pte_index(env, pte_index)) { 351 return H_PARAMETER; 352 } 353 354 if (flags & H_READ_4) { 355 /* Clear the two low order bits */ 356 pte_index &= ~(3ULL); 357 n_entries = 4; 358 } 359 360 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64); 361 362 for (i = 0, ridx = 0; i < n_entries; i++) { 363 args[ridx++] = ldq_p(hpte); 364 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); 365 hpte += HASH_PTE_SIZE_64; 366 } 367 368 return H_SUCCESS; 369 } 370 371 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 372 target_ulong opcode, target_ulong *args) 373 { 374 /* FIXME: actually implement this */ 375 return H_HARDWARE; 376 } 377 378 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL 379 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL 380 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL 381 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL 382 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL 383 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL 384 385 #define VPA_MIN_SIZE 640 386 #define VPA_SIZE_OFFSET 0x4 387 #define VPA_SHARED_PROC_OFFSET 0x9 388 #define VPA_SHARED_PROC_VAL 0x2 389 390 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) 391 { 392 CPUState *cs = CPU(ppc_env_get_cpu(env)); 393 uint16_t size; 394 uint8_t tmp; 395 396 if (vpa == 0) { 397 hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); 398 return H_HARDWARE; 399 } 400 401 if (vpa % env->dcache_line_size) { 402 return H_PARAMETER; 403 } 404 /* FIXME: bounds check the address */ 405 406 size = lduw_be_phys(cs->as, vpa + 0x4); 407 408 if (size < VPA_MIN_SIZE) { 409 return H_PARAMETER; 410 } 411 412 /* VPA is not allowed to cross a page boundary */ 413 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { 414 return H_PARAMETER; 415 } 416 417 env->vpa_addr = vpa; 418 419 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET); 420 tmp |= VPA_SHARED_PROC_VAL; 421 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); 422 423 return H_SUCCESS; 424 } 425 426 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa) 427 { 428 if (env->slb_shadow_addr) { 429 return H_RESOURCE; 430 } 431 432 if (env->dtl_addr) { 433 return H_RESOURCE; 434 } 435 436 env->vpa_addr = 0; 437 return H_SUCCESS; 438 } 439 440 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) 441 { 442 CPUState *cs = CPU(ppc_env_get_cpu(env)); 443 uint32_t size; 444 445 if (addr == 0) { 446 hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); 447 return H_HARDWARE; 448 } 449 450 size = ldl_be_phys(cs->as, addr + 0x4); 451 if (size < 0x8) { 452 return H_PARAMETER; 453 } 454 455 if ((addr / 4096) != ((addr + size - 1) / 4096)) { 456 return H_PARAMETER; 457 } 458 459 if (!env->vpa_addr) { 460 return H_RESOURCE; 461 } 462 463 env->slb_shadow_addr = addr; 464 env->slb_shadow_size = size; 465 466 return H_SUCCESS; 467 } 468 469 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr) 470 { 471 env->slb_shadow_addr = 0; 472 env->slb_shadow_size = 0; 473 return H_SUCCESS; 474 } 475 476 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) 477 { 478 CPUState *cs = CPU(ppc_env_get_cpu(env)); 479 uint32_t size; 480 481 if (addr == 0) { 482 hcall_dprintf("Can't cope with DTL at logical 0\n"); 483 return H_HARDWARE; 484 } 485 486 size = ldl_be_phys(cs->as, addr + 0x4); 487 488 if (size < 48) { 489 return H_PARAMETER; 490 } 491 492 if (!env->vpa_addr) { 493 return H_RESOURCE; 494 } 495 496 env->dtl_addr = addr; 497 env->dtl_size = size; 498 499 return H_SUCCESS; 500 } 501 502 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr) 503 { 504 env->dtl_addr = 0; 505 env->dtl_size = 0; 506 507 return H_SUCCESS; 508 } 509 510 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr, 511 target_ulong opcode, target_ulong *args) 512 { 513 target_ulong flags = args[0]; 514 target_ulong procno = args[1]; 515 target_ulong vpa = args[2]; 516 target_ulong ret = H_PARAMETER; 517 CPUPPCState *tenv; 518 PowerPCCPU *tcpu; 519 520 tcpu = ppc_get_vcpu_by_dt_id(procno); 521 if (!tcpu) { 522 return H_PARAMETER; 523 } 524 tenv = &tcpu->env; 525 526 switch (flags) { 527 case FLAGS_REGISTER_VPA: 528 ret = register_vpa(tenv, vpa); 529 break; 530 531 case FLAGS_DEREGISTER_VPA: 532 ret = deregister_vpa(tenv, vpa); 533 break; 534 535 case FLAGS_REGISTER_SLBSHADOW: 536 ret = register_slb_shadow(tenv, vpa); 537 break; 538 539 case FLAGS_DEREGISTER_SLBSHADOW: 540 ret = deregister_slb_shadow(tenv, vpa); 541 break; 542 543 case FLAGS_REGISTER_DTL: 544 ret = register_dtl(tenv, vpa); 545 break; 546 547 case FLAGS_DEREGISTER_DTL: 548 ret = deregister_dtl(tenv, vpa); 549 break; 550 } 551 552 return ret; 553 } 554 555 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr, 556 target_ulong opcode, target_ulong *args) 557 { 558 CPUPPCState *env = &cpu->env; 559 CPUState *cs = CPU(cpu); 560 561 env->msr |= (1ULL << MSR_EE); 562 hreg_compute_hflags(env); 563 if (!cpu_has_work(cs)) { 564 cs->halted = 1; 565 cs->exception_index = EXCP_HLT; 566 cs->exit_request = 1; 567 } 568 return H_SUCCESS; 569 } 570 571 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr, 572 target_ulong opcode, target_ulong *args) 573 { 574 target_ulong rtas_r3 = args[0]; 575 uint32_t token = rtas_ld(rtas_r3, 0); 576 uint32_t nargs = rtas_ld(rtas_r3, 1); 577 uint32_t nret = rtas_ld(rtas_r3, 2); 578 579 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, 580 nret, rtas_r3 + 12 + 4*nargs); 581 } 582 583 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr, 584 target_ulong opcode, target_ulong *args) 585 { 586 CPUState *cs = CPU(cpu); 587 target_ulong size = args[0]; 588 target_ulong addr = args[1]; 589 590 switch (size) { 591 case 1: 592 args[0] = ldub_phys(cs->as, addr); 593 return H_SUCCESS; 594 case 2: 595 args[0] = lduw_phys(cs->as, addr); 596 return H_SUCCESS; 597 case 4: 598 args[0] = ldl_phys(cs->as, addr); 599 return H_SUCCESS; 600 case 8: 601 args[0] = ldq_phys(cs->as, addr); 602 return H_SUCCESS; 603 } 604 return H_PARAMETER; 605 } 606 607 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr, 608 target_ulong opcode, target_ulong *args) 609 { 610 CPUState *cs = CPU(cpu); 611 612 target_ulong size = args[0]; 613 target_ulong addr = args[1]; 614 target_ulong val = args[2]; 615 616 switch (size) { 617 case 1: 618 stb_phys(cs->as, addr, val); 619 return H_SUCCESS; 620 case 2: 621 stw_phys(cs->as, addr, val); 622 return H_SUCCESS; 623 case 4: 624 stl_phys(cs->as, addr, val); 625 return H_SUCCESS; 626 case 8: 627 stq_phys(cs->as, addr, val); 628 return H_SUCCESS; 629 } 630 return H_PARAMETER; 631 } 632 633 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr, 634 target_ulong opcode, target_ulong *args) 635 { 636 CPUState *cs = CPU(cpu); 637 638 target_ulong dst = args[0]; /* Destination address */ 639 target_ulong src = args[1]; /* Source address */ 640 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ 641 target_ulong count = args[3]; /* Element count */ 642 target_ulong op = args[4]; /* 0 = copy, 1 = invert */ 643 uint64_t tmp; 644 unsigned int mask = (1 << esize) - 1; 645 int step = 1 << esize; 646 647 if (count > 0x80000000) { 648 return H_PARAMETER; 649 } 650 651 if ((dst & mask) || (src & mask) || (op > 1)) { 652 return H_PARAMETER; 653 } 654 655 if (dst >= src && dst < (src + (count << esize))) { 656 dst = dst + ((count - 1) << esize); 657 src = src + ((count - 1) << esize); 658 step = -step; 659 } 660 661 while (count--) { 662 switch (esize) { 663 case 0: 664 tmp = ldub_phys(cs->as, src); 665 break; 666 case 1: 667 tmp = lduw_phys(cs->as, src); 668 break; 669 case 2: 670 tmp = ldl_phys(cs->as, src); 671 break; 672 case 3: 673 tmp = ldq_phys(cs->as, src); 674 break; 675 default: 676 return H_PARAMETER; 677 } 678 if (op == 1) { 679 tmp = ~tmp; 680 } 681 switch (esize) { 682 case 0: 683 stb_phys(cs->as, dst, tmp); 684 break; 685 case 1: 686 stw_phys(cs->as, dst, tmp); 687 break; 688 case 2: 689 stl_phys(cs->as, dst, tmp); 690 break; 691 case 3: 692 stq_phys(cs->as, dst, tmp); 693 break; 694 } 695 dst = dst + step; 696 src = src + step; 697 } 698 699 return H_SUCCESS; 700 } 701 702 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 703 target_ulong opcode, target_ulong *args) 704 { 705 /* Nothing to do on emulation, KVM will trap this in the kernel */ 706 return H_SUCCESS; 707 } 708 709 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr, 710 target_ulong opcode, target_ulong *args) 711 { 712 /* Nothing to do on emulation, KVM will trap this in the kernel */ 713 return H_SUCCESS; 714 } 715 716 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, 717 target_ulong mflags, 718 target_ulong value1, 719 target_ulong value2) 720 { 721 CPUState *cs; 722 723 if (value1) { 724 return H_P3; 725 } 726 if (value2) { 727 return H_P4; 728 } 729 730 switch (mflags) { 731 case H_SET_MODE_ENDIAN_BIG: 732 CPU_FOREACH(cs) { 733 set_spr(cs, SPR_LPCR, 0, LPCR_ILE); 734 } 735 spapr_pci_switch_vga(true); 736 return H_SUCCESS; 737 738 case H_SET_MODE_ENDIAN_LITTLE: 739 CPU_FOREACH(cs) { 740 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE); 741 } 742 spapr_pci_switch_vga(false); 743 return H_SUCCESS; 744 } 745 746 return H_UNSUPPORTED_FLAG; 747 } 748 749 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, 750 target_ulong mflags, 751 target_ulong value1, 752 target_ulong value2) 753 { 754 CPUState *cs; 755 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 756 target_ulong prefix; 757 758 if (!(pcc->insns_flags2 & PPC2_ISA207S)) { 759 return H_P2; 760 } 761 if (value1) { 762 return H_P3; 763 } 764 if (value2) { 765 return H_P4; 766 } 767 768 switch (mflags) { 769 case H_SET_MODE_ADDR_TRANS_NONE: 770 prefix = 0; 771 break; 772 case H_SET_MODE_ADDR_TRANS_0001_8000: 773 prefix = 0x18000; 774 break; 775 case H_SET_MODE_ADDR_TRANS_C000_0000_0000_4000: 776 prefix = 0xC000000000004000ULL; 777 break; 778 default: 779 return H_UNSUPPORTED_FLAG; 780 } 781 782 CPU_FOREACH(cs) { 783 CPUPPCState *env = &POWERPC_CPU(cpu)->env; 784 785 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL); 786 env->excp_prefix = prefix; 787 } 788 789 return H_SUCCESS; 790 } 791 792 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr, 793 target_ulong opcode, target_ulong *args) 794 { 795 target_ulong resource = args[1]; 796 target_ulong ret = H_P2; 797 798 switch (resource) { 799 case H_SET_MODE_RESOURCE_LE: 800 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]); 801 break; 802 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 803 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], 804 args[2], args[3]); 805 break; 806 } 807 808 return ret; 809 } 810 811 typedef struct { 812 PowerPCCPU *cpu; 813 uint32_t cpu_version; 814 int ret; 815 } SetCompatState; 816 817 static void do_set_compat(void *arg) 818 { 819 SetCompatState *s = arg; 820 821 cpu_synchronize_state(CPU(s->cpu)); 822 s->ret = ppc_set_compat(s->cpu, s->cpu_version); 823 } 824 825 #define get_compat_level(cpuver) ( \ 826 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \ 827 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \ 828 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \ 829 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0) 830 831 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_, 832 sPAPRMachineState *spapr, 833 target_ulong opcode, 834 target_ulong *args) 835 { 836 target_ulong list = args[0]; 837 PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_); 838 CPUState *cs; 839 bool cpu_match = false; 840 unsigned old_cpu_version = cpu_->cpu_version; 841 unsigned compat_lvl = 0, cpu_version = 0; 842 unsigned max_lvl = get_compat_level(cpu_->max_compat); 843 int counter; 844 845 /* Parse PVR list */ 846 for (counter = 0; counter < 512; ++counter) { 847 uint32_t pvr, pvr_mask; 848 849 pvr_mask = rtas_ld(list, 0); 850 list += 4; 851 pvr = rtas_ld(list, 0); 852 list += 4; 853 854 trace_spapr_cas_pvr_try(pvr); 855 if (!max_lvl && 856 ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) { 857 cpu_match = true; 858 cpu_version = 0; 859 } else if (pvr == cpu_->cpu_version) { 860 cpu_match = true; 861 cpu_version = cpu_->cpu_version; 862 } else if (!cpu_match) { 863 /* If it is a logical PVR, try to determine the highest level */ 864 unsigned lvl = get_compat_level(pvr); 865 if (lvl) { 866 bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) && 867 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05)); 868 bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) && 869 ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) || 870 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS))); 871 872 if (is205 || is206) { 873 if (!max_lvl) { 874 /* User did not set the level, choose the highest */ 875 if (compat_lvl <= lvl) { 876 compat_lvl = lvl; 877 cpu_version = pvr; 878 } 879 } else if (max_lvl >= lvl) { 880 /* User chose the level, don't set higher than this */ 881 compat_lvl = lvl; 882 cpu_version = pvr; 883 } 884 } 885 } 886 } 887 /* Terminator record */ 888 if (~pvr_mask & pvr) { 889 break; 890 } 891 } 892 893 /* For the future use: here @list points to the first capability */ 894 895 /* Parsing finished */ 896 trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match, 897 cpu_version, pcc_->pcr_mask); 898 899 /* Update CPUs */ 900 if (old_cpu_version != cpu_version) { 901 CPU_FOREACH(cs) { 902 SetCompatState s = { 903 .cpu = POWERPC_CPU(cs), 904 .cpu_version = cpu_version, 905 .ret = 0 906 }; 907 908 run_on_cpu(cs, do_set_compat, &s); 909 910 if (s.ret < 0) { 911 fprintf(stderr, "Unable to set compatibility mode\n"); 912 return H_HARDWARE; 913 } 914 } 915 } 916 917 if (!cpu_version) { 918 return H_SUCCESS; 919 } 920 921 if (!list) { 922 return H_SUCCESS; 923 } 924 925 if (spapr_h_cas_compose_response(spapr, args[1], args[2])) { 926 qemu_system_reset_request(); 927 } 928 929 return H_SUCCESS; 930 } 931 932 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; 933 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; 934 935 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) 936 { 937 spapr_hcall_fn *slot; 938 939 if (opcode <= MAX_HCALL_OPCODE) { 940 assert((opcode & 0x3) == 0); 941 942 slot = &papr_hypercall_table[opcode / 4]; 943 } else { 944 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); 945 946 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 947 } 948 949 assert(!(*slot)); 950 *slot = fn; 951 } 952 953 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, 954 target_ulong *args) 955 { 956 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 957 958 if ((opcode <= MAX_HCALL_OPCODE) 959 && ((opcode & 0x3) == 0)) { 960 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; 961 962 if (fn) { 963 return fn(cpu, spapr, opcode, args); 964 } 965 } else if ((opcode >= KVMPPC_HCALL_BASE) && 966 (opcode <= KVMPPC_HCALL_MAX)) { 967 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 968 969 if (fn) { 970 return fn(cpu, spapr, opcode, args); 971 } 972 } 973 974 hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode); 975 return H_FUNCTION; 976 } 977 978 static void hypercall_register_types(void) 979 { 980 /* hcall-pft */ 981 spapr_register_hypercall(H_ENTER, h_enter); 982 spapr_register_hypercall(H_REMOVE, h_remove); 983 spapr_register_hypercall(H_PROTECT, h_protect); 984 spapr_register_hypercall(H_READ, h_read); 985 986 /* hcall-bulk */ 987 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); 988 989 /* hcall-dabr */ 990 spapr_register_hypercall(H_SET_DABR, h_set_dabr); 991 992 /* hcall-splpar */ 993 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); 994 spapr_register_hypercall(H_CEDE, h_cede); 995 996 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate 997 * here between the "CI" and the "CACHE" variants, they will use whatever 998 * mapping attributes qemu is using. When using KVM, the kernel will 999 * enforce the attributes more strongly 1000 */ 1001 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); 1002 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); 1003 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); 1004 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); 1005 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); 1006 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); 1007 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); 1008 1009 /* qemu/KVM-PPC specific hcalls */ 1010 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); 1011 1012 spapr_register_hypercall(H_SET_MODE, h_set_mode); 1013 1014 /* ibm,client-architecture-support support */ 1015 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); 1016 } 1017 1018 type_init(hypercall_register_types) 1019