1 #include "qemu/osdep.h" 2 #include "qemu/cutils.h" 3 #include "exec/exec-all.h" 4 #include "helper_regs.h" 5 #include "hw/ppc/ppc.h" 6 #include "hw/ppc/spapr.h" 7 #include "hw/ppc/spapr_cpu_core.h" 8 #include "hw/ppc/spapr_nested.h" 9 #include "mmu-book3s-v3.h" 10 #include "cpu-models.h" 11 #include "qemu/log.h" 12 13 void spapr_nested_reset(SpaprMachineState *spapr) 14 { 15 if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { 16 spapr_unregister_nested_hv(); 17 spapr_register_nested_hv(); 18 } else if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_PAPR)) { 19 spapr->nested.capabilities_set = false; 20 spapr_unregister_nested_papr(); 21 spapr_register_nested_papr(); 22 spapr_nested_gsb_init(); 23 } else { 24 spapr->nested.api = 0; 25 } 26 } 27 28 uint8_t spapr_nested_api(SpaprMachineState *spapr) 29 { 30 return spapr->nested.api; 31 } 32 33 #ifdef CONFIG_TCG 34 35 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 36 target_ulong lpid, ppc_v3_pate_t *entry) 37 { 38 uint64_t patb, pats; 39 40 assert(lpid != 0); 41 42 patb = spapr->nested.ptcr & PTCR_PATB; 43 pats = spapr->nested.ptcr & PTCR_PATS; 44 45 /* Check if partition table is properly aligned */ 46 if (patb & MAKE_64BIT_MASK(0, pats + 12)) { 47 return false; 48 } 49 50 /* Calculate number of entries */ 51 pats = 1ull << (pats + 12 - 4); 52 if (pats <= lpid) { 53 return false; 54 } 55 56 /* Grab entry */ 57 patb += 16 * lpid; 58 entry->dw0 = ldq_phys(CPU(cpu)->as, patb); 59 entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); 60 return true; 61 } 62 63 static 64 SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr, 65 target_ulong guestid) 66 { 67 SpaprMachineStateNestedGuest *guest; 68 69 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); 70 return guest; 71 } 72 73 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, 74 target_ulong lpid, ppc_v3_pate_t *entry) 75 { 76 SpaprMachineStateNestedGuest *guest; 77 assert(lpid != 0); 78 guest = spapr_get_nested_guest(spapr, lpid); 79 if (!guest) { 80 return false; 81 } 82 83 entry->dw0 = guest->parttbl[0]; 84 entry->dw1 = guest->parttbl[1]; 85 return true; 86 } 87 88 #define PRTS_MASK 0x1f 89 90 static target_ulong h_set_ptbl(PowerPCCPU *cpu, 91 SpaprMachineState *spapr, 92 target_ulong opcode, 93 target_ulong *args) 94 { 95 target_ulong ptcr = args[0]; 96 97 if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { 98 return H_FUNCTION; 99 } 100 101 if ((ptcr & PRTS_MASK) + 12 - 4 > 12) { 102 return H_PARAMETER; 103 } 104 105 spapr->nested.ptcr = ptcr; /* Save new partition table */ 106 107 return H_SUCCESS; 108 } 109 110 static target_ulong h_tlb_invalidate(PowerPCCPU *cpu, 111 SpaprMachineState *spapr, 112 target_ulong opcode, 113 target_ulong *args) 114 { 115 /* 116 * The spapr virtual hypervisor nested HV implementation retains no L2 117 * translation state except for TLB. And the TLB is always invalidated 118 * across L1<->L2 transitions, so nothing is required here. 119 */ 120 121 return H_SUCCESS; 122 } 123 124 static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu, 125 SpaprMachineState *spapr, 126 target_ulong opcode, 127 target_ulong *args) 128 { 129 /* 130 * This HCALL is not required, L1 KVM will take a slow path and walk the 131 * page tables manually to do the data copy. 132 */ 133 return H_FUNCTION; 134 } 135 136 static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) 137 { 138 CPUPPCState *env = &cpu->env; 139 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 140 141 memcpy(save->gpr, env->gpr, sizeof(save->gpr)); 142 143 save->lr = env->lr; 144 save->ctr = env->ctr; 145 save->cfar = env->cfar; 146 save->msr = env->msr; 147 save->nip = env->nip; 148 149 save->cr = ppc_get_cr(env); 150 save->xer = cpu_read_xer(env); 151 152 save->lpcr = env->spr[SPR_LPCR]; 153 save->lpidr = env->spr[SPR_LPIDR]; 154 save->pcr = env->spr[SPR_PCR]; 155 save->dpdes = env->spr[SPR_DPDES]; 156 save->hfscr = env->spr[SPR_HFSCR]; 157 save->srr0 = env->spr[SPR_SRR0]; 158 save->srr1 = env->spr[SPR_SRR1]; 159 save->sprg0 = env->spr[SPR_SPRG0]; 160 save->sprg1 = env->spr[SPR_SPRG1]; 161 save->sprg2 = env->spr[SPR_SPRG2]; 162 save->sprg3 = env->spr[SPR_SPRG3]; 163 save->pidr = env->spr[SPR_BOOKS_PID]; 164 save->ppr = env->spr[SPR_PPR]; 165 166 if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 167 save->amor = env->spr[SPR_AMOR]; 168 save->dawr0 = env->spr[SPR_DAWR0]; 169 save->dawrx0 = env->spr[SPR_DAWRX0]; 170 save->ciabr = env->spr[SPR_CIABR]; 171 save->purr = env->spr[SPR_PURR]; 172 save->spurr = env->spr[SPR_SPURR]; 173 save->ic = env->spr[SPR_IC]; 174 save->vtb = env->spr[SPR_VTB]; 175 save->hdar = env->spr[SPR_HDAR]; 176 save->hdsisr = env->spr[SPR_HDSISR]; 177 save->heir = env->spr[SPR_HEIR]; 178 save->asdr = env->spr[SPR_ASDR]; 179 save->dawr1 = env->spr[SPR_DAWR1]; 180 save->dawrx1 = env->spr[SPR_DAWRX1]; 181 save->dexcr = env->spr[SPR_DEXCR]; 182 save->hdexcr = env->spr[SPR_HDEXCR]; 183 save->hashkeyr = env->spr[SPR_HASHKEYR]; 184 save->hashpkeyr = env->spr[SPR_HASHPKEYR]; 185 memcpy(save->vsr, env->vsr, sizeof(save->vsr)); 186 save->ebbhr = env->spr[SPR_EBBHR]; 187 save->tar = env->spr[SPR_TAR]; 188 save->ebbrr = env->spr[SPR_EBBRR]; 189 save->bescr = env->spr[SPR_BESCR]; 190 save->iamr = env->spr[SPR_IAMR]; 191 save->amr = env->spr[SPR_AMR]; 192 save->uamor = env->spr[SPR_UAMOR]; 193 save->dscr = env->spr[SPR_DSCR]; 194 save->fscr = env->spr[SPR_FSCR]; 195 save->pspb = env->spr[SPR_PSPB]; 196 save->ctrl = env->spr[SPR_CTRL]; 197 save->vrsave = env->spr[SPR_VRSAVE]; 198 save->dar = env->spr[SPR_DAR]; 199 save->dsisr = env->spr[SPR_DSISR]; 200 save->pmc1 = env->spr[SPR_POWER_PMC1]; 201 save->pmc2 = env->spr[SPR_POWER_PMC2]; 202 save->pmc3 = env->spr[SPR_POWER_PMC3]; 203 save->pmc4 = env->spr[SPR_POWER_PMC4]; 204 save->pmc5 = env->spr[SPR_POWER_PMC5]; 205 save->pmc6 = env->spr[SPR_POWER_PMC6]; 206 save->mmcr0 = env->spr[SPR_POWER_MMCR0]; 207 save->mmcr1 = env->spr[SPR_POWER_MMCR1]; 208 save->mmcr2 = env->spr[SPR_POWER_MMCR2]; 209 save->mmcra = env->spr[SPR_POWER_MMCRA]; 210 save->sdar = env->spr[SPR_POWER_SDAR]; 211 save->siar = env->spr[SPR_POWER_SIAR]; 212 save->sier = env->spr[SPR_POWER_SIER]; 213 save->vscr = ppc_get_vscr(env); 214 save->fpscr = env->fpscr; 215 } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { 216 save->tb_offset = env->tb_env->tb_offset; 217 } 218 } 219 220 static void nested_post_load_state(CPUPPCState *env, CPUState *cs) 221 { 222 /* 223 * compute hflags and possible interrupts. 224 */ 225 hreg_compute_hflags(env); 226 ppc_maybe_interrupt(env); 227 /* 228 * Nested HV does not tag TLB entries between L1 and L2, so must 229 * flush on transition. 230 */ 231 tlb_flush(cs); 232 env->reserve_addr = -1; /* Reset the reservation */ 233 } 234 235 static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) 236 { 237 CPUPPCState *env = &cpu->env; 238 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 239 240 memcpy(env->gpr, load->gpr, sizeof(env->gpr)); 241 242 env->lr = load->lr; 243 env->ctr = load->ctr; 244 env->cfar = load->cfar; 245 env->msr = load->msr; 246 env->nip = load->nip; 247 248 ppc_set_cr(env, load->cr); 249 cpu_write_xer(env, load->xer); 250 251 env->spr[SPR_LPCR] = load->lpcr; 252 env->spr[SPR_LPIDR] = load->lpidr; 253 env->spr[SPR_PCR] = load->pcr; 254 env->spr[SPR_DPDES] = load->dpdes; 255 env->spr[SPR_HFSCR] = load->hfscr; 256 env->spr[SPR_SRR0] = load->srr0; 257 env->spr[SPR_SRR1] = load->srr1; 258 env->spr[SPR_SPRG0] = load->sprg0; 259 env->spr[SPR_SPRG1] = load->sprg1; 260 env->spr[SPR_SPRG2] = load->sprg2; 261 env->spr[SPR_SPRG3] = load->sprg3; 262 env->spr[SPR_BOOKS_PID] = load->pidr; 263 env->spr[SPR_PPR] = load->ppr; 264 265 if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 266 env->spr[SPR_AMOR] = load->amor; 267 env->spr[SPR_DAWR0] = load->dawr0; 268 env->spr[SPR_DAWRX0] = load->dawrx0; 269 env->spr[SPR_CIABR] = load->ciabr; 270 env->spr[SPR_PURR] = load->purr; 271 env->spr[SPR_SPURR] = load->purr; 272 env->spr[SPR_IC] = load->ic; 273 env->spr[SPR_VTB] = load->vtb; 274 env->spr[SPR_HDAR] = load->hdar; 275 env->spr[SPR_HDSISR] = load->hdsisr; 276 env->spr[SPR_HEIR] = load->heir; 277 env->spr[SPR_ASDR] = load->asdr; 278 env->spr[SPR_DAWR1] = load->dawr1; 279 env->spr[SPR_DAWRX1] = load->dawrx1; 280 env->spr[SPR_DEXCR] = load->dexcr; 281 env->spr[SPR_HDEXCR] = load->hdexcr; 282 env->spr[SPR_HASHKEYR] = load->hashkeyr; 283 env->spr[SPR_HASHPKEYR] = load->hashpkeyr; 284 memcpy(env->vsr, load->vsr, sizeof(env->vsr)); 285 env->spr[SPR_EBBHR] = load->ebbhr; 286 env->spr[SPR_TAR] = load->tar; 287 env->spr[SPR_EBBRR] = load->ebbrr; 288 env->spr[SPR_BESCR] = load->bescr; 289 env->spr[SPR_IAMR] = load->iamr; 290 env->spr[SPR_AMR] = load->amr; 291 env->spr[SPR_UAMOR] = load->uamor; 292 env->spr[SPR_DSCR] = load->dscr; 293 env->spr[SPR_FSCR] = load->fscr; 294 env->spr[SPR_PSPB] = load->pspb; 295 env->spr[SPR_CTRL] = load->ctrl; 296 env->spr[SPR_VRSAVE] = load->vrsave; 297 env->spr[SPR_DAR] = load->dar; 298 env->spr[SPR_DSISR] = load->dsisr; 299 env->spr[SPR_POWER_PMC1] = load->pmc1; 300 env->spr[SPR_POWER_PMC2] = load->pmc2; 301 env->spr[SPR_POWER_PMC3] = load->pmc3; 302 env->spr[SPR_POWER_PMC4] = load->pmc4; 303 env->spr[SPR_POWER_PMC5] = load->pmc5; 304 env->spr[SPR_POWER_PMC6] = load->pmc6; 305 env->spr[SPR_POWER_MMCR0] = load->mmcr0; 306 env->spr[SPR_POWER_MMCR1] = load->mmcr1; 307 env->spr[SPR_POWER_MMCR2] = load->mmcr2; 308 env->spr[SPR_POWER_MMCRA] = load->mmcra; 309 env->spr[SPR_POWER_SDAR] = load->sdar; 310 env->spr[SPR_POWER_SIAR] = load->siar; 311 env->spr[SPR_POWER_SIER] = load->sier; 312 ppc_store_vscr(env, load->vscr); 313 ppc_store_fpscr(env, load->fpscr); 314 } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { 315 env->tb_env->tb_offset = load->tb_offset; 316 } 317 } 318 319 /* 320 * When this handler returns, the environment is switched to the L2 guest 321 * and TCG begins running that. spapr_exit_nested() performs the switch from 322 * L2 back to L1 and returns from the H_ENTER_NESTED hcall. 323 */ 324 static target_ulong h_enter_nested(PowerPCCPU *cpu, 325 SpaprMachineState *spapr, 326 target_ulong opcode, 327 target_ulong *args) 328 { 329 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 330 CPUPPCState *env = &cpu->env; 331 CPUState *cs = CPU(cpu); 332 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 333 struct nested_ppc_state l2_state; 334 target_ulong hv_ptr = args[0]; 335 target_ulong regs_ptr = args[1]; 336 target_ulong hdec, now = cpu_ppc_load_tbl(env); 337 target_ulong lpcr, lpcr_mask; 338 struct kvmppc_hv_guest_state *hvstate; 339 struct kvmppc_hv_guest_state hv_state; 340 struct kvmppc_pt_regs *regs; 341 hwaddr len; 342 343 if (spapr->nested.ptcr == 0) { 344 return H_NOT_AVAILABLE; 345 } 346 347 len = sizeof(*hvstate); 348 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false, 349 MEMTXATTRS_UNSPECIFIED); 350 if (len != sizeof(*hvstate)) { 351 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false); 352 return H_PARAMETER; 353 } 354 355 memcpy(&hv_state, hvstate, len); 356 357 address_space_unmap(CPU(cpu)->as, hvstate, len, len, false); 358 359 /* 360 * We accept versions 1 and 2. Version 2 fields are unused because TCG 361 * does not implement DAWR*. 362 */ 363 if (hv_state.version > HV_GUEST_STATE_VERSION) { 364 return H_PARAMETER; 365 } 366 367 if (hv_state.lpid == 0) { 368 return H_PARAMETER; 369 } 370 371 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1); 372 if (!spapr_cpu->nested_host_state) { 373 return H_NO_MEM; 374 } 375 376 assert(env->spr[SPR_LPIDR] == 0); 377 assert(env->spr[SPR_DPDES] == 0); 378 nested_save_state(spapr_cpu->nested_host_state, cpu); 379 380 len = sizeof(*regs); 381 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false, 382 MEMTXATTRS_UNSPECIFIED); 383 if (!regs || len != sizeof(*regs)) { 384 address_space_unmap(CPU(cpu)->as, regs, len, 0, false); 385 g_free(spapr_cpu->nested_host_state); 386 return H_P2; 387 } 388 389 len = sizeof(l2_state.gpr); 390 assert(len == sizeof(regs->gpr)); 391 memcpy(l2_state.gpr, regs->gpr, len); 392 393 l2_state.lr = regs->link; 394 l2_state.ctr = regs->ctr; 395 l2_state.xer = regs->xer; 396 l2_state.cr = regs->ccr; 397 l2_state.msr = regs->msr; 398 l2_state.nip = regs->nip; 399 400 address_space_unmap(CPU(cpu)->as, regs, len, len, false); 401 402 l2_state.cfar = hv_state.cfar; 403 l2_state.lpidr = hv_state.lpid; 404 405 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; 406 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask); 407 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; 408 lpcr &= ~LPCR_LPES0; 409 l2_state.lpcr = lpcr & pcc->lpcr_mask; 410 411 l2_state.pcr = hv_state.pcr; 412 /* hv_state.amor is not used */ 413 l2_state.dpdes = hv_state.dpdes; 414 l2_state.hfscr = hv_state.hfscr; 415 /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/ 416 l2_state.srr0 = hv_state.srr0; 417 l2_state.srr1 = hv_state.srr1; 418 l2_state.sprg0 = hv_state.sprg[0]; 419 l2_state.sprg1 = hv_state.sprg[1]; 420 l2_state.sprg2 = hv_state.sprg[2]; 421 l2_state.sprg3 = hv_state.sprg[3]; 422 l2_state.pidr = hv_state.pidr; 423 l2_state.ppr = hv_state.ppr; 424 l2_state.tb_offset = env->tb_env->tb_offset + hv_state.tb_offset; 425 426 /* 427 * Switch to the nested guest environment and start the "hdec" timer. 428 */ 429 nested_load_state(cpu, &l2_state); 430 nested_post_load_state(env, cs); 431 432 hdec = hv_state.hdec_expiry - now; 433 cpu_ppc_hdecr_init(env); 434 cpu_ppc_store_hdecr(env, hdec); 435 436 /* 437 * The hv_state.vcpu_token is not needed. It is used by the KVM 438 * implementation to remember which L2 vCPU last ran on which physical 439 * CPU so as to invalidate process scope translations if it is moved 440 * between physical CPUs. For now TLBs are always flushed on L1<->L2 441 * transitions so this is not a problem. 442 * 443 * Could validate that the same vcpu_token does not attempt to run on 444 * different L1 vCPUs at the same time, but that would be a L1 KVM bug 445 * and it's not obviously worth a new data structure to do it. 446 */ 447 448 spapr_cpu->in_nested = true; 449 450 /* 451 * The spapr hcall helper sets env->gpr[3] to the return value, but at 452 * this point the L1 is not returning from the hcall but rather we 453 * start running the L2, so r3 must not be clobbered, so return env->gpr[3] 454 * to leave it unchanged. 455 */ 456 return env->gpr[3]; 457 } 458 459 static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp) 460 { 461 CPUPPCState *env = &cpu->env; 462 CPUState *cs = CPU(cpu); 463 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 464 struct nested_ppc_state l2_state; 465 target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4]; 466 target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5]; 467 target_ulong hsrr0, hsrr1, hdar, asdr, hdsisr; 468 struct kvmppc_hv_guest_state *hvstate; 469 struct kvmppc_pt_regs *regs; 470 hwaddr len; 471 472 nested_save_state(&l2_state, cpu); 473 hsrr0 = env->spr[SPR_HSRR0]; 474 hsrr1 = env->spr[SPR_HSRR1]; 475 hdar = env->spr[SPR_HDAR]; 476 hdsisr = env->spr[SPR_HDSISR]; 477 asdr = env->spr[SPR_ASDR]; 478 479 /* 480 * Switch back to the host environment (including for any error). 481 */ 482 assert(env->spr[SPR_LPIDR] != 0); 483 nested_load_state(cpu, spapr_cpu->nested_host_state); 484 nested_post_load_state(env, cs); 485 env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */ 486 487 cpu_ppc_hdecr_exit(env); 488 489 spapr_cpu->in_nested = false; 490 491 g_free(spapr_cpu->nested_host_state); 492 spapr_cpu->nested_host_state = NULL; 493 494 len = sizeof(*hvstate); 495 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true, 496 MEMTXATTRS_UNSPECIFIED); 497 if (len != sizeof(*hvstate)) { 498 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true); 499 env->gpr[3] = H_PARAMETER; 500 return; 501 } 502 503 hvstate->cfar = l2_state.cfar; 504 hvstate->lpcr = l2_state.lpcr; 505 hvstate->pcr = l2_state.pcr; 506 hvstate->dpdes = l2_state.dpdes; 507 hvstate->hfscr = l2_state.hfscr; 508 509 if (excp == POWERPC_EXCP_HDSI) { 510 hvstate->hdar = hdar; 511 hvstate->hdsisr = hdsisr; 512 hvstate->asdr = asdr; 513 } else if (excp == POWERPC_EXCP_HISI) { 514 hvstate->asdr = asdr; 515 } 516 517 /* HEIR should be implemented for HV mode and saved here. */ 518 hvstate->srr0 = l2_state.srr0; 519 hvstate->srr1 = l2_state.srr1; 520 hvstate->sprg[0] = l2_state.sprg0; 521 hvstate->sprg[1] = l2_state.sprg1; 522 hvstate->sprg[2] = l2_state.sprg2; 523 hvstate->sprg[3] = l2_state.sprg3; 524 hvstate->pidr = l2_state.pidr; 525 hvstate->ppr = l2_state.ppr; 526 527 /* Is it okay to specify write length larger than actual data written? */ 528 address_space_unmap(CPU(cpu)->as, hvstate, len, len, true); 529 530 len = sizeof(*regs); 531 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true, 532 MEMTXATTRS_UNSPECIFIED); 533 if (!regs || len != sizeof(*regs)) { 534 address_space_unmap(CPU(cpu)->as, regs, len, 0, true); 535 env->gpr[3] = H_P2; 536 return; 537 } 538 539 len = sizeof(env->gpr); 540 assert(len == sizeof(regs->gpr)); 541 memcpy(regs->gpr, l2_state.gpr, len); 542 543 regs->link = l2_state.lr; 544 regs->ctr = l2_state.ctr; 545 regs->xer = l2_state.xer; 546 regs->ccr = l2_state.cr; 547 548 if (excp == POWERPC_EXCP_MCHECK || 549 excp == POWERPC_EXCP_RESET || 550 excp == POWERPC_EXCP_SYSCALL) { 551 regs->nip = l2_state.srr0; 552 regs->msr = l2_state.srr1 & env->msr_mask; 553 } else { 554 regs->nip = hsrr0; 555 regs->msr = hsrr1 & env->msr_mask; 556 } 557 558 /* Is it okay to specify write length larger than actual data written? */ 559 address_space_unmap(CPU(cpu)->as, regs, len, len, true); 560 } 561 562 static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest, 563 target_ulong vcpuid, bool inoutbuf) 564 { 565 struct SpaprMachineStateNestedGuestVcpu *vcpu; 566 /* 567 * Perform sanity checks for the provided vcpuid of a guest. 568 * For now, ensure its valid, allocated and enabled for use. 569 */ 570 571 if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) { 572 return false; 573 } 574 575 if (!(vcpuid < guest->nr_vcpus)) { 576 return false; 577 } 578 579 vcpu = &guest->vcpus[vcpuid]; 580 if (!vcpu->enabled) { 581 return false; 582 } 583 584 if (!inoutbuf) { 585 return true; 586 } 587 588 /* Check to see if the in/out buffers are registered */ 589 if (vcpu->runbufin.addr && vcpu->runbufout.addr) { 590 return true; 591 } 592 593 return false; 594 } 595 596 static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest, 597 target_ulong vcpuid) 598 { 599 assert(spapr_nested_vcpu_check(guest, vcpuid, false)); 600 return &guest->vcpus[vcpuid].state; 601 } 602 603 static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest, 604 target_ulong vcpuid) 605 { 606 assert(spapr_nested_vcpu_check(guest, vcpuid, false)); 607 return &guest->vcpus[vcpuid]; 608 } 609 610 static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest, 611 target_ulong vcpuid) 612 { 613 return guest; /* for GSBE_NESTED */ 614 } 615 616 /* 617 * set=1 means the L1 is trying to set some state 618 * set=0 means the L1 is trying to get some state 619 */ 620 static void copy_state_8to8(void *a, void *b, bool set) 621 { 622 /* set takes from the Big endian element_buf and sets internal buffer */ 623 624 if (set) { 625 *(uint64_t *)a = be64_to_cpu(*(uint64_t *)b); 626 } else { 627 *(uint64_t *)b = cpu_to_be64(*(uint64_t *)a); 628 } 629 } 630 631 static void copy_state_4to4(void *a, void *b, bool set) 632 { 633 if (set) { 634 *(uint32_t *)a = be32_to_cpu(*(uint32_t *)b); 635 } else { 636 *(uint32_t *)b = cpu_to_be32(*((uint32_t *)a)); 637 } 638 } 639 640 static void copy_state_16to16(void *a, void *b, bool set) 641 { 642 uint64_t *src, *dst; 643 644 if (set) { 645 src = b; 646 dst = a; 647 648 dst[1] = be64_to_cpu(src[0]); 649 dst[0] = be64_to_cpu(src[1]); 650 } else { 651 src = a; 652 dst = b; 653 654 dst[1] = cpu_to_be64(src[0]); 655 dst[0] = cpu_to_be64(src[1]); 656 } 657 } 658 659 static void copy_state_4to8(void *a, void *b, bool set) 660 { 661 if (set) { 662 *(uint64_t *)a = (uint64_t) be32_to_cpu(*(uint32_t *)b); 663 } else { 664 *(uint32_t *)b = cpu_to_be32((uint32_t) (*((uint64_t *)a))); 665 } 666 } 667 668 static void copy_state_pagetbl(void *a, void *b, bool set) 669 { 670 uint64_t *pagetbl; 671 uint64_t *buf; /* 3 double words */ 672 uint64_t rts; 673 674 assert(set); 675 676 pagetbl = a; 677 buf = b; 678 679 *pagetbl = be64_to_cpu(buf[0]); 680 /* as per ISA section 6.7.6.1 */ 681 *pagetbl |= PATE0_HR; /* Host Radix bit is 1 */ 682 683 /* RTS */ 684 rts = be64_to_cpu(buf[1]); 685 assert(rts == 52); 686 rts = rts - 31; /* since radix tree size = 2^(RTS+31) */ 687 *pagetbl |= ((rts & 0x7) << 5); /* RTS2 is bit 56:58 */ 688 *pagetbl |= (((rts >> 3) & 0x3) << 61); /* RTS1 is bit 1:2 */ 689 690 /* RPDS {Size = 2^(RPDS+3) , RPDS >=5} */ 691 *pagetbl |= 63 - clz64(be64_to_cpu(buf[2])) - 3; 692 } 693 694 static void copy_state_proctbl(void *a, void *b, bool set) 695 { 696 uint64_t *proctbl; 697 uint64_t *buf; /* 2 double words */ 698 699 assert(set); 700 701 proctbl = a; 702 buf = b; 703 /* PRTB: Process Table Base */ 704 *proctbl = be64_to_cpu(buf[0]); 705 /* PRTS: Process Table Size = 2^(12+PRTS) */ 706 if (be64_to_cpu(buf[1]) == (1ULL << 12)) { 707 *proctbl |= 0; 708 } else if (be64_to_cpu(buf[1]) == (1ULL << 24)) { 709 *proctbl |= 12; 710 } else { 711 g_assert_not_reached(); 712 } 713 } 714 715 static void copy_state_runbuf(void *a, void *b, bool set) 716 { 717 uint64_t *buf; /* 2 double words */ 718 struct SpaprMachineStateNestedGuestVcpuRunBuf *runbuf; 719 720 assert(set); 721 722 runbuf = a; 723 buf = b; 724 725 runbuf->addr = be64_to_cpu(buf[0]); 726 assert(runbuf->addr); 727 728 /* per spec */ 729 assert(be64_to_cpu(buf[1]) <= 16384); 730 731 /* 732 * This will also hit in the input buffer but should be fine for 733 * now. If not we can split this function. 734 */ 735 assert(be64_to_cpu(buf[1]) >= VCPU_OUT_BUF_MIN_SZ); 736 737 runbuf->size = be64_to_cpu(buf[1]); 738 } 739 740 /* tell the L1 how big we want the output vcpu run buffer */ 741 static void out_buf_min_size(void *a, void *b, bool set) 742 { 743 uint64_t *buf; /* 1 double word */ 744 745 assert(!set); 746 747 buf = b; 748 749 buf[0] = cpu_to_be64(VCPU_OUT_BUF_MIN_SZ); 750 } 751 752 static void copy_logical_pvr(void *a, void *b, bool set) 753 { 754 SpaprMachineStateNestedGuest *guest; 755 uint32_t *buf; /* 1 word */ 756 uint32_t *pvr_logical_ptr; 757 uint32_t pvr_logical; 758 target_ulong pcr = 0; 759 760 pvr_logical_ptr = a; 761 buf = b; 762 763 if (!set) { 764 buf[0] = cpu_to_be32(*pvr_logical_ptr); 765 return; 766 } 767 768 pvr_logical = be32_to_cpu(buf[0]); 769 770 *pvr_logical_ptr = pvr_logical; 771 772 if (*pvr_logical_ptr) { 773 switch (*pvr_logical_ptr) { 774 case CPU_POWERPC_LOGICAL_3_10: 775 pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; 776 break; 777 case CPU_POWERPC_LOGICAL_3_00: 778 pcr = PCR_COMPAT_3_00; 779 break; 780 default: 781 qemu_log_mask(LOG_GUEST_ERROR, 782 "Could not set PCR for LPVR=0x%08x\n", 783 *pvr_logical_ptr); 784 return; 785 } 786 } 787 788 guest = container_of(pvr_logical_ptr, 789 struct SpaprMachineStateNestedGuest, 790 pvr_logical); 791 for (int i = 0; i < guest->nr_vcpus; i++) { 792 guest->vcpus[i].state.pcr = ~pcr | HVMASK_PCR; 793 } 794 } 795 796 static void copy_tb_offset(void *a, void *b, bool set) 797 { 798 SpaprMachineStateNestedGuest *guest; 799 uint64_t *buf; /* 1 double word */ 800 uint64_t *tb_offset_ptr; 801 uint64_t tb_offset; 802 803 tb_offset_ptr = a; 804 buf = b; 805 806 if (!set) { 807 buf[0] = cpu_to_be64(*tb_offset_ptr); 808 return; 809 } 810 811 tb_offset = be64_to_cpu(buf[0]); 812 /* need to copy this to the individual tb_offset for each vcpu */ 813 guest = container_of(tb_offset_ptr, 814 struct SpaprMachineStateNestedGuest, 815 tb_offset); 816 for (int i = 0; i < guest->nr_vcpus; i++) { 817 guest->vcpus[i].tb_offset = tb_offset; 818 } 819 } 820 821 static void copy_state_hdecr(void *a, void *b, bool set) 822 { 823 uint64_t *buf; /* 1 double word */ 824 uint64_t *hdecr_expiry_tb; 825 826 hdecr_expiry_tb = a; 827 buf = b; 828 829 if (!set) { 830 buf[0] = cpu_to_be64(*hdecr_expiry_tb); 831 return; 832 } 833 834 *hdecr_expiry_tb = be64_to_cpu(buf[0]); 835 } 836 837 struct guest_state_element_type guest_state_element_types[] = { 838 GUEST_STATE_ELEMENT_NOP(GSB_HV_VCPU_IGNORED_ID, 0), 839 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR0, gpr[0]), 840 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR1, gpr[1]), 841 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR2, gpr[2]), 842 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR3, gpr[3]), 843 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR4, gpr[4]), 844 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR5, gpr[5]), 845 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR6, gpr[6]), 846 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR7, gpr[7]), 847 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR8, gpr[8]), 848 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR9, gpr[9]), 849 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR10, gpr[10]), 850 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR11, gpr[11]), 851 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR12, gpr[12]), 852 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR13, gpr[13]), 853 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR14, gpr[14]), 854 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR15, gpr[15]), 855 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR16, gpr[16]), 856 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR17, gpr[17]), 857 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR18, gpr[18]), 858 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR19, gpr[19]), 859 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR20, gpr[20]), 860 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR21, gpr[21]), 861 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR22, gpr[22]), 862 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR23, gpr[23]), 863 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR24, gpr[24]), 864 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR25, gpr[25]), 865 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR26, gpr[26]), 866 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR27, gpr[27]), 867 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR28, gpr[28]), 868 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR29, gpr[29]), 869 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR30, gpr[30]), 870 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR31, gpr[31]), 871 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_NIA, nip), 872 GSE_ENV_DWM(GSB_VCPU_SPR_MSR, msr, HVMASK_MSR), 873 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTR, ctr), 874 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_LR, lr), 875 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_XER, xer), 876 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_CR, cr), 877 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_MMCR3), 878 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER2), 879 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER3), 880 GUEST_STATE_ELEMENT_NOP_W(GSB_VCPU_SPR_WORT), 881 GSE_ENV_DWM(GSB_VCPU_SPR_LPCR, lpcr, HVMASK_LPCR), 882 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMOR, amor), 883 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HFSCR, hfscr), 884 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR0, dawr0), 885 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX0, dawrx0), 886 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CIABR, ciabr), 887 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PURR, purr), 888 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPURR, spurr), 889 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IC, ic), 890 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_VTB, vtb), 891 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HDAR, hdar), 892 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HDSISR, hdsisr), 893 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HEIR, heir), 894 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_ASDR, asdr), 895 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR0, srr0), 896 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR1, srr1), 897 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG0, sprg0), 898 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG1, sprg1), 899 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG2, sprg2), 900 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG3, sprg3), 901 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PIDR, pidr), 902 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CFAR, cfar), 903 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PPR, ppr), 904 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR1, dawr1), 905 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX1, dawrx1), 906 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DEXCR, dexcr), 907 GSE_ENV_DWM(GSB_VCPU_SPR_HDEXCR, hdexcr, HVMASK_HDEXCR), 908 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHKEYR, hashkeyr), 909 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHPKEYR, hashpkeyr), 910 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR0, vsr[0]), 911 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR1, vsr[1]), 912 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR2, vsr[2]), 913 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR3, vsr[3]), 914 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR4, vsr[4]), 915 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR5, vsr[5]), 916 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR6, vsr[6]), 917 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR7, vsr[7]), 918 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR8, vsr[8]), 919 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR9, vsr[9]), 920 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR10, vsr[10]), 921 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR11, vsr[11]), 922 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR12, vsr[12]), 923 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR13, vsr[13]), 924 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR14, vsr[14]), 925 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR15, vsr[15]), 926 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR16, vsr[16]), 927 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR17, vsr[17]), 928 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR18, vsr[18]), 929 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR19, vsr[19]), 930 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR20, vsr[20]), 931 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR21, vsr[21]), 932 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR22, vsr[22]), 933 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR23, vsr[23]), 934 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR24, vsr[24]), 935 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR25, vsr[25]), 936 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR26, vsr[26]), 937 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR27, vsr[27]), 938 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR28, vsr[28]), 939 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR29, vsr[29]), 940 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR30, vsr[30]), 941 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR31, vsr[31]), 942 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR32, vsr[32]), 943 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR33, vsr[33]), 944 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR34, vsr[34]), 945 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR35, vsr[35]), 946 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR36, vsr[36]), 947 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR37, vsr[37]), 948 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR38, vsr[38]), 949 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR39, vsr[39]), 950 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR40, vsr[40]), 951 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR41, vsr[41]), 952 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR42, vsr[42]), 953 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR43, vsr[43]), 954 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR44, vsr[44]), 955 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR45, vsr[45]), 956 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR46, vsr[46]), 957 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR47, vsr[47]), 958 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR48, vsr[48]), 959 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR49, vsr[49]), 960 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR50, vsr[50]), 961 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR51, vsr[51]), 962 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR52, vsr[52]), 963 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR53, vsr[53]), 964 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR54, vsr[54]), 965 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR55, vsr[55]), 966 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR56, vsr[56]), 967 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR57, vsr[57]), 968 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR58, vsr[58]), 969 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR59, vsr[59]), 970 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR60, vsr[60]), 971 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR61, vsr[61]), 972 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR62, vsr[62]), 973 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR63, vsr[63]), 974 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBHR, ebbhr), 975 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_TAR, tar), 976 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBRR, ebbrr), 977 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_BESCR, bescr), 978 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IAMR, iamr), 979 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMR, amr), 980 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_UAMOR, uamor), 981 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DSCR, dscr), 982 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), 983 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), 984 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), 985 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), 986 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), 987 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), 988 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC1, pmc1), 989 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC2, pmc2), 990 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC3, pmc3), 991 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC4, pmc4), 992 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC5, pmc5), 993 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC6, pmc6), 994 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR0, mmcr0), 995 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR1, mmcr1), 996 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR2, mmcr2), 997 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCRA, mmcra), 998 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SDAR , sdar), 999 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIAR , siar), 1000 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIER , sier), 1001 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_VSCR, vscr), 1002 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FPSCR, fpscr), 1003 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_DEC_EXPIRE_TB, dec_expiry_tb), 1004 GSBE_NESTED(GSB_PART_SCOPED_PAGETBL, 0x18, parttbl[0], copy_state_pagetbl), 1005 GSBE_NESTED(GSB_PROCESS_TBL, 0x10, parttbl[1], copy_state_proctbl), 1006 GSBE_NESTED(GSB_VCPU_LPVR, 0x4, pvr_logical, copy_logical_pvr), 1007 GSBE_NESTED_MSK(GSB_TB_OFFSET, 0x8, tb_offset, copy_tb_offset, 1008 HVMASK_TB_OFFSET), 1009 GSBE_NESTED_VCPU(GSB_VCPU_IN_BUFFER, 0x10, runbufin, copy_state_runbuf), 1010 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf), 1011 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size), 1012 GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb, 1013 copy_state_hdecr) 1014 }; 1015 1016 void spapr_nested_gsb_init(void) 1017 { 1018 struct guest_state_element_type *type; 1019 1020 /* Init the guest state elements lookup table, flags for now */ 1021 for (int i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) { 1022 type = &guest_state_element_types[i]; 1023 1024 assert(type->id <= GSB_LAST); 1025 if (type->id >= GSB_VCPU_SPR_HDAR) 1026 /* 0xf000 - 0xf005 Thread + RO */ 1027 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY; 1028 else if (type->id >= GSB_VCPU_IN_BUFFER) 1029 /* 0x0c00 - 0xf000 Thread + RW */ 1030 type->flags = 0; 1031 else if (type->id >= GSB_VCPU_LPVR) 1032 /* 0x0003 - 0x0bff Guest + RW */ 1033 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; 1034 else if (type->id >= GSB_HV_VCPU_STATE_SIZE) 1035 /* 0x0001 - 0x0002 Guest + RO */ 1036 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY | 1037 GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; 1038 } 1039 } 1040 1041 static struct guest_state_element *guest_state_element_next( 1042 struct guest_state_element *element, 1043 int64_t *len, 1044 int64_t *num_elements) 1045 { 1046 uint16_t size; 1047 1048 /* size is of element->value[] only. Not whole guest_state_element */ 1049 size = be16_to_cpu(element->size); 1050 1051 if (len) { 1052 *len -= size + offsetof(struct guest_state_element, value); 1053 } 1054 1055 if (num_elements) { 1056 *num_elements -= 1; 1057 } 1058 1059 return (struct guest_state_element *)(element->value + size); 1060 } 1061 1062 static 1063 struct guest_state_element_type *guest_state_element_type_find(uint16_t id) 1064 { 1065 int i; 1066 1067 for (i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) 1068 if (id == guest_state_element_types[i].id) { 1069 return &guest_state_element_types[i]; 1070 } 1071 1072 return NULL; 1073 } 1074 1075 static void log_element(struct guest_state_element *element, 1076 struct guest_state_request *gsr) 1077 { 1078 qemu_log_mask(LOG_GUEST_ERROR, "h_guest_%s_state id:0x%04x size:0x%04x", 1079 gsr->flags & GUEST_STATE_REQUEST_SET ? "set" : "get", 1080 be16_to_cpu(element->id), be16_to_cpu(element->size)); 1081 qemu_log_mask(LOG_GUEST_ERROR, "buf:0x%016"PRIx64" ...\n", 1082 be64_to_cpu(*(uint64_t *)element->value)); 1083 } 1084 1085 static bool guest_state_request_check(struct guest_state_request *gsr) 1086 { 1087 int64_t num_elements, len = gsr->len; 1088 struct guest_state_buffer *gsb = gsr->gsb; 1089 struct guest_state_element *element; 1090 struct guest_state_element_type *type; 1091 uint16_t id, size; 1092 1093 /* gsb->num_elements = 0 == 32 bits long */ 1094 assert(len >= 4); 1095 1096 num_elements = be32_to_cpu(gsb->num_elements); 1097 element = gsb->elements; 1098 len -= sizeof(gsb->num_elements); 1099 1100 /* Walk the buffer to validate the length */ 1101 while (num_elements) { 1102 1103 id = be16_to_cpu(element->id); 1104 size = be16_to_cpu(element->size); 1105 1106 if (false) { 1107 log_element(element, gsr); 1108 } 1109 /* buffer size too small */ 1110 if (len < 0) { 1111 return false; 1112 } 1113 1114 type = guest_state_element_type_find(id); 1115 if (!type) { 1116 qemu_log_mask(LOG_GUEST_ERROR, "Element ID %04x unknown\n", id); 1117 log_element(element, gsr); 1118 return false; 1119 } 1120 1121 if (id == GSB_HV_VCPU_IGNORED_ID) { 1122 goto next_element; 1123 } 1124 1125 if (size != type->size) { 1126 qemu_log_mask(LOG_GUEST_ERROR, "Size mismatch. Element ID:%04x." 1127 "Size Exp:%i Got:%i\n", id, type->size, size); 1128 log_element(element, gsr); 1129 return false; 1130 } 1131 1132 if ((type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY) && 1133 (gsr->flags & GUEST_STATE_REQUEST_SET)) { 1134 qemu_log_mask(LOG_GUEST_ERROR, "Trying to set a read-only Element " 1135 "ID:%04x.\n", id); 1136 return false; 1137 } 1138 1139 if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { 1140 /* guest wide element type */ 1141 if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) { 1142 qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide " 1143 "Element ID:%04x.\n", id); 1144 return false; 1145 } 1146 } else { 1147 /* thread wide element type */ 1148 if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) { 1149 qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide " 1150 "Element ID:%04x.\n", id); 1151 return false; 1152 } 1153 } 1154 next_element: 1155 element = guest_state_element_next(element, &len, &num_elements); 1156 1157 } 1158 return true; 1159 } 1160 1161 static bool is_gsr_invalid(struct guest_state_request *gsr, 1162 struct guest_state_element *element, 1163 struct guest_state_element_type *type) 1164 { 1165 if ((gsr->flags & GUEST_STATE_REQUEST_SET) && 1166 (*(uint64_t *)(element->value) & ~(type->mask))) { 1167 log_element(element, gsr); 1168 qemu_log_mask(LOG_GUEST_ERROR, "L1 can't set reserved bits " 1169 "(allowed mask: 0x%08"PRIx64")\n", type->mask); 1170 return true; 1171 } 1172 return false; 1173 } 1174 1175 static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, 1176 SpaprMachineState *spapr, 1177 target_ulong opcode, 1178 target_ulong *args) 1179 { 1180 CPUPPCState *env = &cpu->env; 1181 target_ulong flags = args[0]; 1182 1183 if (flags) { /* don't handle any flags capabilities for now */ 1184 return H_PARAMETER; 1185 } 1186 1187 /* P10 capabilities */ 1188 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, 1189 spapr->max_compat_pvr)) { 1190 env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE; 1191 } 1192 1193 /* P9 capabilities */ 1194 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, 1195 spapr->max_compat_pvr)) { 1196 env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE; 1197 } 1198 1199 return H_SUCCESS; 1200 } 1201 1202 static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, 1203 SpaprMachineState *spapr, 1204 target_ulong opcode, 1205 target_ulong *args) 1206 { 1207 CPUPPCState *env = &cpu->env; 1208 target_ulong flags = args[0]; 1209 target_ulong capabilities = args[1]; 1210 env->gpr[4] = 0; 1211 1212 if (flags) { /* don't handle any flags capabilities for now */ 1213 return H_PARAMETER; 1214 } 1215 1216 if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) { 1217 env->gpr[4] = 1; 1218 return H_P2; /* isn't supported */ 1219 } 1220 1221 /* 1222 * If there are no capabilities configured, set the R5 to the index of 1223 * the first supported Power Processor Mode 1224 */ 1225 if (!capabilities) { 1226 env->gpr[4] = 1; 1227 1228 /* set R5 to the first supported Power Processor Mode */ 1229 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, 1230 spapr->max_compat_pvr)) { 1231 env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; 1232 } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, 1233 spapr->max_compat_pvr)) { 1234 env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP; 1235 } 1236 1237 return H_P2; 1238 } 1239 1240 /* 1241 * If an invalid capability is set, R5 should contain the index of the 1242 * invalid capability bit 1243 */ 1244 if (capabilities & ~H_GUEST_CAP_VALID_MASK) { 1245 env->gpr[4] = 1; 1246 1247 /* Set R5 to the index of the invalid capability */ 1248 env->gpr[5] = 63 - ctz64(capabilities); 1249 1250 return H_P2; 1251 } 1252 1253 if (!spapr->nested.capabilities_set) { 1254 spapr->nested.capabilities_set = true; 1255 spapr->nested.pvr_base = env->spr[SPR_PVR]; 1256 return H_SUCCESS; 1257 } else { 1258 return H_STATE; 1259 } 1260 } 1261 1262 static void 1263 destroy_guest_helper(gpointer value) 1264 { 1265 struct SpaprMachineStateNestedGuest *guest = value; 1266 g_free(guest->vcpus); 1267 g_free(guest); 1268 } 1269 1270 static target_ulong h_guest_create(PowerPCCPU *cpu, 1271 SpaprMachineState *spapr, 1272 target_ulong opcode, 1273 target_ulong *args) 1274 { 1275 CPUPPCState *env = &cpu->env; 1276 target_ulong flags = args[0]; 1277 target_ulong continue_token = args[1]; 1278 uint64_t guestid; 1279 int nguests = 0; 1280 struct SpaprMachineStateNestedGuest *guest; 1281 1282 if (flags) { /* don't handle any flags for now */ 1283 return H_UNSUPPORTED_FLAG; 1284 } 1285 1286 if (continue_token != -1) { 1287 return H_P2; 1288 } 1289 1290 if (!spapr->nested.capabilities_set) { 1291 return H_STATE; 1292 } 1293 1294 if (!spapr->nested.guests) { 1295 spapr->nested.guests = g_hash_table_new_full(NULL, 1296 NULL, 1297 NULL, 1298 destroy_guest_helper); 1299 } 1300 1301 nguests = g_hash_table_size(spapr->nested.guests); 1302 1303 if (nguests == PAPR_NESTED_GUEST_MAX) { 1304 return H_NO_MEM; 1305 } 1306 1307 /* Lookup for available guestid */ 1308 for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) { 1309 if (!(g_hash_table_lookup(spapr->nested.guests, 1310 GINT_TO_POINTER(guestid)))) { 1311 break; 1312 } 1313 } 1314 1315 if (guestid == PAPR_NESTED_GUEST_MAX) { 1316 return H_NO_MEM; 1317 } 1318 1319 guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1); 1320 if (!guest) { 1321 return H_NO_MEM; 1322 } 1323 1324 guest->pvr_logical = spapr->nested.pvr_base; 1325 g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest); 1326 env->gpr[4] = guestid; 1327 1328 return H_SUCCESS; 1329 } 1330 1331 static target_ulong h_guest_delete(PowerPCCPU *cpu, 1332 SpaprMachineState *spapr, 1333 target_ulong opcode, 1334 target_ulong *args) 1335 { 1336 target_ulong flags = args[0]; 1337 target_ulong guestid = args[1]; 1338 struct SpaprMachineStateNestedGuest *guest; 1339 1340 /* 1341 * handle flag deleteAllGuests, if set: 1342 * guestid is ignored and all guests are deleted 1343 * 1344 */ 1345 if (flags & ~H_GUEST_DELETE_ALL_FLAG) { 1346 return H_UNSUPPORTED_FLAG; /* other flag bits reserved */ 1347 } else if (flags & H_GUEST_DELETE_ALL_FLAG) { 1348 g_hash_table_destroy(spapr->nested.guests); 1349 return H_SUCCESS; 1350 } 1351 1352 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); 1353 if (!guest) { 1354 return H_P2; 1355 } 1356 1357 g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid)); 1358 1359 return H_SUCCESS; 1360 } 1361 1362 static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu, 1363 SpaprMachineState *spapr, 1364 target_ulong opcode, 1365 target_ulong *args) 1366 { 1367 target_ulong flags = args[0]; 1368 target_ulong guestid = args[1]; 1369 target_ulong vcpuid = args[2]; 1370 SpaprMachineStateNestedGuest *guest; 1371 1372 if (flags) { /* don't handle any flags for now */ 1373 return H_UNSUPPORTED_FLAG; 1374 } 1375 1376 guest = spapr_get_nested_guest(spapr, guestid); 1377 if (!guest) { 1378 return H_P2; 1379 } 1380 1381 if (vcpuid < guest->nr_vcpus) { 1382 qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.", 1383 vcpuid); 1384 return H_IN_USE; 1385 } 1386 /* linear vcpuid allocation only */ 1387 assert(vcpuid == guest->nr_vcpus); 1388 1389 if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) { 1390 return H_P3; 1391 } 1392 1393 SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu; 1394 vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu, 1395 guest->vcpus, 1396 guest->nr_vcpus + 1); 1397 if (!vcpus) { 1398 return H_NO_MEM; 1399 } 1400 guest->vcpus = vcpus; 1401 curr_vcpu = &vcpus[guest->nr_vcpus]; 1402 memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu)); 1403 1404 curr_vcpu->enabled = true; 1405 guest->nr_vcpus++; 1406 1407 return H_SUCCESS; 1408 } 1409 1410 static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, 1411 uint64_t vcpuid, 1412 struct guest_state_request *gsr) 1413 { 1414 void *ptr; 1415 uint16_t id; 1416 struct guest_state_element *element; 1417 struct guest_state_element_type *type; 1418 int64_t lenleft, num_elements; 1419 1420 lenleft = gsr->len; 1421 1422 if (!guest_state_request_check(gsr)) { 1423 return H_P3; 1424 } 1425 1426 num_elements = be32_to_cpu(gsr->gsb->num_elements); 1427 element = gsr->gsb->elements; 1428 /* Process the elements */ 1429 while (num_elements) { 1430 type = NULL; 1431 /* log_element(element, gsr); */ 1432 1433 id = be16_to_cpu(element->id); 1434 if (id == GSB_HV_VCPU_IGNORED_ID) { 1435 goto next_element; 1436 } 1437 1438 type = guest_state_element_type_find(id); 1439 assert(type); 1440 1441 /* Get pointer to guest data to get/set */ 1442 if (type->location && type->copy) { 1443 ptr = type->location(guest, vcpuid); 1444 assert(ptr); 1445 if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) { 1446 return H_INVALID_ELEMENT_VALUE; 1447 } 1448 type->copy(ptr + type->offset, element->value, 1449 gsr->flags & GUEST_STATE_REQUEST_SET ? true : false); 1450 } 1451 1452 next_element: 1453 element = guest_state_element_next(element, &lenleft, &num_elements); 1454 } 1455 1456 return H_SUCCESS; 1457 } 1458 1459 static target_ulong map_and_getset_state(PowerPCCPU *cpu, 1460 SpaprMachineStateNestedGuest *guest, 1461 uint64_t vcpuid, 1462 struct guest_state_request *gsr) 1463 { 1464 target_ulong rc; 1465 int64_t len; 1466 bool is_write; 1467 1468 len = gsr->len; 1469 /* only get_state would require write access to the provided buffer */ 1470 is_write = (gsr->flags & GUEST_STATE_REQUEST_SET) ? false : true; 1471 gsr->gsb = address_space_map(CPU(cpu)->as, gsr->buf, (uint64_t *)&len, 1472 is_write, MEMTXATTRS_UNSPECIFIED); 1473 if (!gsr->gsb) { 1474 rc = H_P3; 1475 goto out1; 1476 } 1477 1478 if (len != gsr->len) { 1479 rc = H_P3; 1480 goto out1; 1481 } 1482 1483 rc = getset_state(guest, vcpuid, gsr); 1484 1485 out1: 1486 address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len); 1487 return rc; 1488 } 1489 1490 static target_ulong h_guest_getset_state(PowerPCCPU *cpu, 1491 SpaprMachineState *spapr, 1492 target_ulong *args, 1493 bool set) 1494 { 1495 target_ulong flags = args[0]; 1496 target_ulong lpid = args[1]; 1497 target_ulong vcpuid = args[2]; 1498 target_ulong buf = args[3]; 1499 target_ulong buflen = args[4]; 1500 struct guest_state_request gsr; 1501 SpaprMachineStateNestedGuest *guest; 1502 1503 guest = spapr_get_nested_guest(spapr, lpid); 1504 if (!guest) { 1505 return H_P2; 1506 } 1507 gsr.buf = buf; 1508 assert(buflen <= GSB_MAX_BUF_SIZE); 1509 gsr.len = buflen; 1510 gsr.flags = 0; 1511 if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { 1512 gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE; 1513 } 1514 if (flags & !H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { 1515 return H_PARAMETER; /* flag not supported yet */ 1516 } 1517 1518 if (set) { 1519 gsr.flags |= GUEST_STATE_REQUEST_SET; 1520 } 1521 return map_and_getset_state(cpu, guest, vcpuid, &gsr); 1522 } 1523 1524 static target_ulong h_guest_set_state(PowerPCCPU *cpu, 1525 SpaprMachineState *spapr, 1526 target_ulong opcode, 1527 target_ulong *args) 1528 { 1529 return h_guest_getset_state(cpu, spapr, args, true); 1530 } 1531 1532 static target_ulong h_guest_get_state(PowerPCCPU *cpu, 1533 SpaprMachineState *spapr, 1534 target_ulong opcode, 1535 target_ulong *args) 1536 { 1537 return h_guest_getset_state(cpu, spapr, args, false); 1538 } 1539 1540 static void exit_nested_store_l2(PowerPCCPU *cpu, int excp, 1541 SpaprMachineStateNestedGuestVcpu *vcpu) 1542 { 1543 CPUPPCState *env = &cpu->env; 1544 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 1545 target_ulong now, hdar, hdsisr, asdr; 1546 1547 assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); /* sanity check */ 1548 1549 now = cpu_ppc_load_tbl(env); /* L2 timebase */ 1550 now -= vcpu->tb_offset; /* L1 timebase */ 1551 vcpu->state.dec_expiry_tb = now - cpu_ppc_load_decr(env); 1552 cpu_ppc_store_decr(env, spapr_cpu->nested_host_state->dec_expiry_tb - now); 1553 /* backup hdar, hdsisr, asdr if reqd later below */ 1554 hdar = vcpu->state.hdar; 1555 hdsisr = vcpu->state.hdsisr; 1556 asdr = vcpu->state.asdr; 1557 1558 nested_save_state(&vcpu->state, cpu); 1559 1560 if (excp == POWERPC_EXCP_MCHECK || 1561 excp == POWERPC_EXCP_RESET || 1562 excp == POWERPC_EXCP_SYSCALL) { 1563 vcpu->state.nip = env->spr[SPR_SRR0]; 1564 vcpu->state.msr = env->spr[SPR_SRR1] & env->msr_mask; 1565 } else { 1566 vcpu->state.nip = env->spr[SPR_HSRR0]; 1567 vcpu->state.msr = env->spr[SPR_HSRR1] & env->msr_mask; 1568 } 1569 1570 /* hdar, hdsisr, asdr should be retained unless certain exceptions */ 1571 if ((excp != POWERPC_EXCP_HDSI) && (excp != POWERPC_EXCP_HISI)) { 1572 vcpu->state.asdr = asdr; 1573 } else if (excp != POWERPC_EXCP_HDSI) { 1574 vcpu->state.hdar = hdar; 1575 vcpu->state.hdsisr = hdsisr; 1576 } 1577 } 1578 1579 static int get_exit_ids(uint64_t srr0, uint16_t ids[16]) 1580 { 1581 int nr; 1582 1583 switch (srr0) { 1584 case 0xc00: 1585 nr = 10; 1586 ids[0] = GSB_VCPU_GPR3; 1587 ids[1] = GSB_VCPU_GPR4; 1588 ids[2] = GSB_VCPU_GPR5; 1589 ids[3] = GSB_VCPU_GPR6; 1590 ids[4] = GSB_VCPU_GPR7; 1591 ids[5] = GSB_VCPU_GPR8; 1592 ids[6] = GSB_VCPU_GPR9; 1593 ids[7] = GSB_VCPU_GPR10; 1594 ids[8] = GSB_VCPU_GPR11; 1595 ids[9] = GSB_VCPU_GPR12; 1596 break; 1597 case 0xe00: 1598 nr = 5; 1599 ids[0] = GSB_VCPU_SPR_HDAR; 1600 ids[1] = GSB_VCPU_SPR_HDSISR; 1601 ids[2] = GSB_VCPU_SPR_ASDR; 1602 ids[3] = GSB_VCPU_SPR_NIA; 1603 ids[4] = GSB_VCPU_SPR_MSR; 1604 break; 1605 case 0xe20: 1606 nr = 4; 1607 ids[0] = GSB_VCPU_SPR_HDAR; 1608 ids[1] = GSB_VCPU_SPR_ASDR; 1609 ids[2] = GSB_VCPU_SPR_NIA; 1610 ids[3] = GSB_VCPU_SPR_MSR; 1611 break; 1612 case 0xe40: 1613 nr = 3; 1614 ids[0] = GSB_VCPU_SPR_HEIR; 1615 ids[1] = GSB_VCPU_SPR_NIA; 1616 ids[2] = GSB_VCPU_SPR_MSR; 1617 break; 1618 case 0xf80: 1619 nr = 3; 1620 ids[0] = GSB_VCPU_SPR_HFSCR; 1621 ids[1] = GSB_VCPU_SPR_NIA; 1622 ids[2] = GSB_VCPU_SPR_MSR; 1623 break; 1624 default: 1625 nr = 0; 1626 break; 1627 } 1628 1629 return nr; 1630 } 1631 1632 static void exit_process_output_buffer(PowerPCCPU *cpu, 1633 SpaprMachineStateNestedGuest *guest, 1634 target_ulong vcpuid, 1635 target_ulong *r3) 1636 { 1637 SpaprMachineStateNestedGuestVcpu *vcpu = &guest->vcpus[vcpuid]; 1638 struct guest_state_request gsr; 1639 struct guest_state_buffer *gsb; 1640 struct guest_state_element *element; 1641 struct guest_state_element_type *type; 1642 int exit_id_count = 0; 1643 uint16_t exit_cause_ids[16]; 1644 hwaddr len; 1645 1646 len = vcpu->runbufout.size; 1647 gsb = address_space_map(CPU(cpu)->as, vcpu->runbufout.addr, &len, true, 1648 MEMTXATTRS_UNSPECIFIED); 1649 if (!gsb || len != vcpu->runbufout.size) { 1650 address_space_unmap(CPU(cpu)->as, gsb, len, true, len); 1651 *r3 = H_P2; 1652 return; 1653 } 1654 1655 exit_id_count = get_exit_ids(*r3, exit_cause_ids); 1656 1657 /* Create a buffer of elements to send back */ 1658 gsb->num_elements = cpu_to_be32(exit_id_count); 1659 element = gsb->elements; 1660 for (int i = 0; i < exit_id_count; i++) { 1661 type = guest_state_element_type_find(exit_cause_ids[i]); 1662 assert(type); 1663 element->id = cpu_to_be16(exit_cause_ids[i]); 1664 element->size = cpu_to_be16(type->size); 1665 element = guest_state_element_next(element, NULL, NULL); 1666 } 1667 gsr.gsb = gsb; 1668 gsr.len = VCPU_OUT_BUF_MIN_SZ; 1669 gsr.flags = 0; /* get + never guest wide */ 1670 getset_state(guest, vcpuid, &gsr); 1671 1672 address_space_unmap(CPU(cpu)->as, gsb, len, true, len); 1673 return; 1674 } 1675 1676 static 1677 void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp) 1678 { 1679 CPUPPCState *env = &cpu->env; 1680 CPUState *cs = CPU(cpu); 1681 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 1682 target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */ 1683 target_ulong lpid = 0, vcpuid = 0; 1684 struct SpaprMachineStateNestedGuestVcpu *vcpu = NULL; 1685 struct SpaprMachineStateNestedGuest *guest = NULL; 1686 1687 lpid = spapr_cpu->nested_host_state->gpr[5]; 1688 vcpuid = spapr_cpu->nested_host_state->gpr[6]; 1689 guest = spapr_get_nested_guest(spapr, lpid); 1690 assert(guest); 1691 spapr_nested_vcpu_check(guest, vcpuid, false); 1692 vcpu = &guest->vcpus[vcpuid]; 1693 1694 exit_nested_store_l2(cpu, excp, vcpu); 1695 /* do the output buffer for run_vcpu*/ 1696 exit_process_output_buffer(cpu, guest, vcpuid, &r3_return); 1697 1698 assert(env->spr[SPR_LPIDR] != 0); 1699 nested_load_state(cpu, spapr_cpu->nested_host_state); 1700 cpu_ppc_decrease_tb_by_offset(env, vcpu->tb_offset); 1701 env->gpr[3] = H_SUCCESS; 1702 env->gpr[4] = r3_return; 1703 nested_post_load_state(env, cs); 1704 cpu_ppc_hdecr_exit(env); 1705 1706 spapr_cpu->in_nested = false; 1707 g_free(spapr_cpu->nested_host_state); 1708 spapr_cpu->nested_host_state = NULL; 1709 } 1710 1711 void spapr_exit_nested(PowerPCCPU *cpu, int excp) 1712 { 1713 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 1714 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 1715 1716 assert(spapr_cpu->in_nested); 1717 if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { 1718 spapr_exit_nested_hv(cpu, excp); 1719 } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 1720 spapr_exit_nested_papr(spapr, cpu, excp); 1721 } else { 1722 g_assert_not_reached(); 1723 } 1724 } 1725 1726 static void nested_papr_load_l2(PowerPCCPU *cpu, 1727 CPUPPCState *env, 1728 SpaprMachineStateNestedGuestVcpu *vcpu, 1729 target_ulong now) 1730 { 1731 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 1732 target_ulong lpcr, lpcr_mask, hdec; 1733 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; 1734 1735 assert(vcpu); 1736 assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); 1737 nested_load_state(cpu, &vcpu->state); 1738 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | 1739 (vcpu->state.lpcr & lpcr_mask); 1740 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; 1741 lpcr &= ~LPCR_LPES0; 1742 env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask; 1743 1744 hdec = vcpu->hdecr_expiry_tb - now; 1745 cpu_ppc_store_decr(env, vcpu->state.dec_expiry_tb - now); 1746 cpu_ppc_hdecr_init(env); 1747 cpu_ppc_store_hdecr(env, hdec); 1748 1749 cpu_ppc_increase_tb_by_offset(env, vcpu->tb_offset); 1750 } 1751 1752 static void nested_papr_run_vcpu(PowerPCCPU *cpu, 1753 uint64_t lpid, 1754 SpaprMachineStateNestedGuestVcpu *vcpu) 1755 { 1756 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 1757 CPUPPCState *env = &cpu->env; 1758 CPUState *cs = CPU(cpu); 1759 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 1760 target_ulong now = cpu_ppc_load_tbl(env); 1761 1762 assert(env->spr[SPR_LPIDR] == 0); 1763 assert(spapr->nested.api); /* ensure API version is initialized */ 1764 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1); 1765 assert(spapr_cpu->nested_host_state); 1766 nested_save_state(spapr_cpu->nested_host_state, cpu); 1767 spapr_cpu->nested_host_state->dec_expiry_tb = now - cpu_ppc_load_decr(env); 1768 nested_papr_load_l2(cpu, env, vcpu, now); 1769 env->spr[SPR_LPIDR] = lpid; /* post load l2 */ 1770 1771 spapr_cpu->in_nested = true; 1772 nested_post_load_state(env, cs); 1773 } 1774 1775 static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu, 1776 SpaprMachineState *spapr, 1777 target_ulong opcode, 1778 target_ulong *args) 1779 { 1780 CPUPPCState *env = &cpu->env; 1781 target_ulong flags = args[0]; 1782 target_ulong lpid = args[1]; 1783 target_ulong vcpuid = args[2]; 1784 struct SpaprMachineStateNestedGuestVcpu *vcpu; 1785 struct guest_state_request gsr; 1786 SpaprMachineStateNestedGuest *guest; 1787 target_ulong rc; 1788 1789 if (flags) /* don't handle any flags for now */ 1790 return H_PARAMETER; 1791 1792 guest = spapr_get_nested_guest(spapr, lpid); 1793 if (!guest) { 1794 return H_P2; 1795 } 1796 if (!spapr_nested_vcpu_check(guest, vcpuid, true)) { 1797 return H_P3; 1798 } 1799 1800 if (guest->parttbl[0] == 0) { 1801 /* At least need a partition scoped radix tree */ 1802 return H_NOT_AVAILABLE; 1803 } 1804 1805 vcpu = &guest->vcpus[vcpuid]; 1806 1807 /* Read run_vcpu input buffer to update state */ 1808 gsr.buf = vcpu->runbufin.addr; 1809 gsr.len = vcpu->runbufin.size; 1810 gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */ 1811 rc = map_and_getset_state(cpu, guest, vcpuid, &gsr); 1812 if (rc == H_SUCCESS) { 1813 nested_papr_run_vcpu(cpu, lpid, vcpu); 1814 } else { 1815 env->gpr[3] = rc; 1816 } 1817 return env->gpr[3]; 1818 } 1819 1820 void spapr_register_nested_hv(void) 1821 { 1822 spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl); 1823 spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested); 1824 spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate); 1825 spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest); 1826 } 1827 1828 void spapr_unregister_nested_hv(void) 1829 { 1830 spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE); 1831 spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED); 1832 spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE); 1833 spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST); 1834 } 1835 1836 void spapr_register_nested_papr(void) 1837 { 1838 spapr_register_hypercall(H_GUEST_GET_CAPABILITIES, 1839 h_guest_get_capabilities); 1840 spapr_register_hypercall(H_GUEST_SET_CAPABILITIES, 1841 h_guest_set_capabilities); 1842 spapr_register_hypercall(H_GUEST_CREATE, h_guest_create); 1843 spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete); 1844 spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu); 1845 spapr_register_hypercall(H_GUEST_SET_STATE, h_guest_set_state); 1846 spapr_register_hypercall(H_GUEST_GET_STATE, h_guest_get_state); 1847 spapr_register_hypercall(H_GUEST_RUN_VCPU, h_guest_run_vcpu); 1848 } 1849 1850 void spapr_unregister_nested_papr(void) 1851 { 1852 spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES); 1853 spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES); 1854 spapr_unregister_hypercall(H_GUEST_CREATE); 1855 spapr_unregister_hypercall(H_GUEST_DELETE); 1856 spapr_unregister_hypercall(H_GUEST_CREATE_VCPU); 1857 spapr_unregister_hypercall(H_GUEST_SET_STATE); 1858 spapr_unregister_hypercall(H_GUEST_GET_STATE); 1859 spapr_unregister_hypercall(H_GUEST_RUN_VCPU); 1860 } 1861 1862 #else 1863 void spapr_exit_nested(PowerPCCPU *cpu, int excp) 1864 { 1865 g_assert_not_reached(); 1866 } 1867 1868 void spapr_register_nested_hv(void) 1869 { 1870 /* DO NOTHING */ 1871 } 1872 1873 void spapr_unregister_nested_hv(void) 1874 { 1875 /* DO NOTHING */ 1876 } 1877 1878 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 1879 target_ulong lpid, ppc_v3_pate_t *entry) 1880 { 1881 return false; 1882 } 1883 1884 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, 1885 target_ulong lpid, ppc_v3_pate_t *entry) 1886 { 1887 return false; 1888 } 1889 1890 void spapr_register_nested_papr(void) 1891 { 1892 /* DO NOTHING */ 1893 } 1894 1895 void spapr_unregister_nested_papr(void) 1896 { 1897 /* DO NOTHING */ 1898 } 1899 1900 void spapr_nested_gsb_init(void) 1901 { 1902 /* DO NOTHING */ 1903 } 1904 1905 #endif 1906