1 #ifndef HW_SPAPR_NESTED_H 2 #define HW_SPAPR_NESTED_H 3 4 #include "target/ppc/cpu.h" 5 6 /* Guest State Buffer Element IDs */ 7 #define GSB_HV_VCPU_IGNORED_ID 0x0000 /* An element whose value is ignored */ 8 #define GSB_HV_VCPU_STATE_SIZE 0x0001 /* HV internal format VCPU state size */ 9 #define GSB_VCPU_OUT_BUF_MIN_SZ 0x0002 /* Min size of the Run VCPU o/p buffer */ 10 #define GSB_VCPU_LPVR 0x0003 /* Logical PVR */ 11 #define GSB_TB_OFFSET 0x0004 /* Timebase Offset */ 12 #define GSB_PART_SCOPED_PAGETBL 0x0005 /* Partition Scoped Page Table */ 13 #define GSB_PROCESS_TBL 0x0006 /* Process Table */ 14 /* RESERVED 0x0007 - 0x0BFF */ 15 #define GSB_VCPU_IN_BUFFER 0x0C00 /* Run VCPU Input Buffer */ 16 #define GSB_VCPU_OUT_BUFFER 0x0C01 /* Run VCPU Out Buffer */ 17 #define GSB_VCPU_VPA 0x0C02 /* HRA to Guest VCPU VPA */ 18 /* RESERVED 0x0C03 - 0x0FFF */ 19 #define GSB_VCPU_GPR0 0x1000 20 #define GSB_VCPU_GPR1 0x1001 21 #define GSB_VCPU_GPR2 0x1002 22 #define GSB_VCPU_GPR3 0x1003 23 #define GSB_VCPU_GPR4 0x1004 24 #define GSB_VCPU_GPR5 0x1005 25 #define GSB_VCPU_GPR6 0x1006 26 #define GSB_VCPU_GPR7 0x1007 27 #define GSB_VCPU_GPR8 0x1008 28 #define GSB_VCPU_GPR9 0x1009 29 #define GSB_VCPU_GPR10 0x100A 30 #define GSB_VCPU_GPR11 0x100B 31 #define GSB_VCPU_GPR12 0x100C 32 #define GSB_VCPU_GPR13 0x100D 33 #define GSB_VCPU_GPR14 0x100E 34 #define GSB_VCPU_GPR15 0x100F 35 #define GSB_VCPU_GPR16 0x1010 36 #define GSB_VCPU_GPR17 0x1011 37 #define GSB_VCPU_GPR18 0x1012 38 #define GSB_VCPU_GPR19 0x1013 39 #define GSB_VCPU_GPR20 0x1014 40 #define GSB_VCPU_GPR21 0x1015 41 #define GSB_VCPU_GPR22 0x1016 42 #define GSB_VCPU_GPR23 0x1017 43 #define GSB_VCPU_GPR24 0x1018 44 #define GSB_VCPU_GPR25 0x1019 45 #define GSB_VCPU_GPR26 0x101A 46 #define GSB_VCPU_GPR27 0x101B 47 #define GSB_VCPU_GPR28 0x101C 48 #define GSB_VCPU_GPR29 0x101D 49 #define GSB_VCPU_GPR30 0x101E 50 #define GSB_VCPU_GPR31 0x101F 51 #define GSB_VCPU_HDEC_EXPIRY_TB 0x1020 52 #define GSB_VCPU_SPR_NIA 0x1021 53 #define GSB_VCPU_SPR_MSR 0x1022 54 #define GSB_VCPU_SPR_LR 0x1023 55 #define GSB_VCPU_SPR_XER 0x1024 56 #define GSB_VCPU_SPR_CTR 0x1025 57 #define GSB_VCPU_SPR_CFAR 0x1026 58 #define GSB_VCPU_SPR_SRR0 0x1027 59 #define GSB_VCPU_SPR_SRR1 0x1028 60 #define GSB_VCPU_SPR_DAR 0x1029 61 #define GSB_VCPU_DEC_EXPIRE_TB 0x102A 62 #define GSB_VCPU_SPR_VTB 0x102B 63 #define GSB_VCPU_SPR_LPCR 0x102C 64 #define GSB_VCPU_SPR_HFSCR 0x102D 65 #define GSB_VCPU_SPR_FSCR 0x102E 66 #define GSB_VCPU_SPR_FPSCR 0x102F 67 #define GSB_VCPU_SPR_DAWR0 0x1030 68 #define GSB_VCPU_SPR_DAWR1 0x1031 69 #define GSB_VCPU_SPR_CIABR 0x1032 70 #define GSB_VCPU_SPR_PURR 0x1033 71 #define GSB_VCPU_SPR_SPURR 0x1034 72 #define GSB_VCPU_SPR_IC 0x1035 73 #define GSB_VCPU_SPR_SPRG0 0x1036 74 #define GSB_VCPU_SPR_SPRG1 0x1037 75 #define GSB_VCPU_SPR_SPRG2 0x1038 76 #define GSB_VCPU_SPR_SPRG3 0x1039 77 #define GSB_VCPU_SPR_PPR 0x103A 78 #define GSB_VCPU_SPR_MMCR0 0x103B 79 #define GSB_VCPU_SPR_MMCR1 0x103C 80 #define GSB_VCPU_SPR_MMCR2 0x103D 81 #define GSB_VCPU_SPR_MMCR3 0x103E 82 #define GSB_VCPU_SPR_MMCRA 0x103F 83 #define GSB_VCPU_SPR_SIER 0x1040 84 #define GSB_VCPU_SPR_SIER2 0x1041 85 #define GSB_VCPU_SPR_SIER3 0x1042 86 #define GSB_VCPU_SPR_BESCR 0x1043 87 #define GSB_VCPU_SPR_EBBHR 0x1044 88 #define GSB_VCPU_SPR_EBBRR 0x1045 89 #define GSB_VCPU_SPR_AMR 0x1046 90 #define GSB_VCPU_SPR_IAMR 0x1047 91 #define GSB_VCPU_SPR_AMOR 0x1048 92 #define GSB_VCPU_SPR_UAMOR 0x1049 93 #define GSB_VCPU_SPR_SDAR 0x104A 94 #define GSB_VCPU_SPR_SIAR 0x104B 95 #define GSB_VCPU_SPR_DSCR 0x104C 96 #define GSB_VCPU_SPR_TAR 0x104D 97 #define GSB_VCPU_SPR_DEXCR 0x104E 98 #define GSB_VCPU_SPR_HDEXCR 0x104F 99 #define GSB_VCPU_SPR_HASHKEYR 0x1050 100 #define GSB_VCPU_SPR_HASHPKEYR 0x1051 101 #define GSB_VCPU_SPR_CTRL 0x1052 102 /* RESERVED 0x1053 - 0x1FFF */ 103 #define GSB_VCPU_SPR_CR 0x2000 104 #define GSB_VCPU_SPR_PIDR 0x2001 105 #define GSB_VCPU_SPR_DSISR 0x2002 106 #define GSB_VCPU_SPR_VSCR 0x2003 107 #define GSB_VCPU_SPR_VRSAVE 0x2004 108 #define GSB_VCPU_SPR_DAWRX0 0x2005 109 #define GSB_VCPU_SPR_DAWRX1 0x2006 110 #define GSB_VCPU_SPR_PMC1 0x2007 111 #define GSB_VCPU_SPR_PMC2 0x2008 112 #define GSB_VCPU_SPR_PMC3 0x2009 113 #define GSB_VCPU_SPR_PMC4 0x200A 114 #define GSB_VCPU_SPR_PMC5 0x200B 115 #define GSB_VCPU_SPR_PMC6 0x200C 116 #define GSB_VCPU_SPR_WORT 0x200D 117 #define GSB_VCPU_SPR_PSPB 0x200E 118 /* RESERVED 0x200F - 0x2FFF */ 119 #define GSB_VCPU_SPR_VSR0 0x3000 120 #define GSB_VCPU_SPR_VSR1 0x3001 121 #define GSB_VCPU_SPR_VSR2 0x3002 122 #define GSB_VCPU_SPR_VSR3 0x3003 123 #define GSB_VCPU_SPR_VSR4 0x3004 124 #define GSB_VCPU_SPR_VSR5 0x3005 125 #define GSB_VCPU_SPR_VSR6 0x3006 126 #define GSB_VCPU_SPR_VSR7 0x3007 127 #define GSB_VCPU_SPR_VSR8 0x3008 128 #define GSB_VCPU_SPR_VSR9 0x3009 129 #define GSB_VCPU_SPR_VSR10 0x300A 130 #define GSB_VCPU_SPR_VSR11 0x300B 131 #define GSB_VCPU_SPR_VSR12 0x300C 132 #define GSB_VCPU_SPR_VSR13 0x300D 133 #define GSB_VCPU_SPR_VSR14 0x300E 134 #define GSB_VCPU_SPR_VSR15 0x300F 135 #define GSB_VCPU_SPR_VSR16 0x3010 136 #define GSB_VCPU_SPR_VSR17 0x3011 137 #define GSB_VCPU_SPR_VSR18 0x3012 138 #define GSB_VCPU_SPR_VSR19 0x3013 139 #define GSB_VCPU_SPR_VSR20 0x3014 140 #define GSB_VCPU_SPR_VSR21 0x3015 141 #define GSB_VCPU_SPR_VSR22 0x3016 142 #define GSB_VCPU_SPR_VSR23 0x3017 143 #define GSB_VCPU_SPR_VSR24 0x3018 144 #define GSB_VCPU_SPR_VSR25 0x3019 145 #define GSB_VCPU_SPR_VSR26 0x301A 146 #define GSB_VCPU_SPR_VSR27 0x301B 147 #define GSB_VCPU_SPR_VSR28 0x301C 148 #define GSB_VCPU_SPR_VSR29 0x301D 149 #define GSB_VCPU_SPR_VSR30 0x301E 150 #define GSB_VCPU_SPR_VSR31 0x301F 151 #define GSB_VCPU_SPR_VSR32 0x3020 152 #define GSB_VCPU_SPR_VSR33 0x3021 153 #define GSB_VCPU_SPR_VSR34 0x3022 154 #define GSB_VCPU_SPR_VSR35 0x3023 155 #define GSB_VCPU_SPR_VSR36 0x3024 156 #define GSB_VCPU_SPR_VSR37 0x3025 157 #define GSB_VCPU_SPR_VSR38 0x3026 158 #define GSB_VCPU_SPR_VSR39 0x3027 159 #define GSB_VCPU_SPR_VSR40 0x3028 160 #define GSB_VCPU_SPR_VSR41 0x3029 161 #define GSB_VCPU_SPR_VSR42 0x302A 162 #define GSB_VCPU_SPR_VSR43 0x302B 163 #define GSB_VCPU_SPR_VSR44 0x302C 164 #define GSB_VCPU_SPR_VSR45 0x302D 165 #define GSB_VCPU_SPR_VSR46 0x302E 166 #define GSB_VCPU_SPR_VSR47 0x302F 167 #define GSB_VCPU_SPR_VSR48 0x3030 168 #define GSB_VCPU_SPR_VSR49 0x3031 169 #define GSB_VCPU_SPR_VSR50 0x3032 170 #define GSB_VCPU_SPR_VSR51 0x3033 171 #define GSB_VCPU_SPR_VSR52 0x3034 172 #define GSB_VCPU_SPR_VSR53 0x3035 173 #define GSB_VCPU_SPR_VSR54 0x3036 174 #define GSB_VCPU_SPR_VSR55 0x3037 175 #define GSB_VCPU_SPR_VSR56 0x3038 176 #define GSB_VCPU_SPR_VSR57 0x3039 177 #define GSB_VCPU_SPR_VSR58 0x303A 178 #define GSB_VCPU_SPR_VSR59 0x303B 179 #define GSB_VCPU_SPR_VSR60 0x303C 180 #define GSB_VCPU_SPR_VSR61 0x303D 181 #define GSB_VCPU_SPR_VSR62 0x303E 182 #define GSB_VCPU_SPR_VSR63 0x303F 183 /* RESERVED 0x3040 - 0xEFFF */ 184 #define GSB_VCPU_SPR_HDAR 0xF000 185 #define GSB_VCPU_SPR_HDSISR 0xF001 186 #define GSB_VCPU_SPR_HEIR 0xF002 187 #define GSB_VCPU_SPR_ASDR 0xF003 188 /* End of list of Guest State Buffer Element IDs */ 189 #define GSB_LAST GSB_VCPU_SPR_ASDR 190 191 typedef struct SpaprMachineStateNested { 192 uint64_t ptcr; 193 uint8_t api; 194 #define NESTED_API_KVM_HV 1 195 #define NESTED_API_PAPR 2 196 bool capabilities_set; 197 uint32_t pvr_base; 198 GHashTable *guests; 199 } SpaprMachineStateNested; 200 201 typedef struct SpaprMachineStateNestedGuest { 202 uint32_t pvr_logical; 203 unsigned long nr_vcpus; 204 uint64_t parttbl[2]; 205 uint64_t tb_offset; 206 struct SpaprMachineStateNestedGuestVcpu *vcpus; 207 } SpaprMachineStateNestedGuest; 208 209 /* Nested PAPR API related macros */ 210 #define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000 211 #define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000 212 #define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000 213 #define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \ 214 H_GUEST_CAPABILITIES_P9_MODE) 215 #define H_GUEST_CAP_COPY_MEM_BMAP 0 216 #define H_GUEST_CAP_P9_MODE_BMAP 1 217 #define H_GUEST_CAP_P10_MODE_BMAP 2 218 #define PAPR_NESTED_GUEST_MAX 4096 219 #define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL 220 #define PAPR_NESTED_GUEST_VCPU_MAX 2048 221 #define VCPU_OUT_BUF_MIN_SZ 0x80ULL 222 #define HVMASK_DEFAULT 0xffffffffffffffff 223 #define HVMASK_LPCR 0x0070000003820800 224 #define HVMASK_MSR 0xEBFFFFFFFFBFEFFF 225 #define HVMASK_HDEXCR 0x00000000FFFFFFFF 226 #define HVMASK_TB_OFFSET 0x000000FFFFFFFFFF 227 228 /* 229 * As per ISA v3.1B, following bits are reserved: 230 * 0:2 231 * 4:57 (ISA mentions bit 58 as well but it should be used for P10) 232 * 61:63 (hence, haven't included PCR bits for v2.06 and v2.05 233 * in LOW BITS) 234 */ 235 #define PCR_LOW_BITS (PCR_COMPAT_3_10 | PCR_COMPAT_3_00) 236 #define HVMASK_PCR (~PCR_LOW_BITS) 237 238 #define GUEST_STATE_ELEMENT(i, sz, s, f, ptr, c) { \ 239 .id = (i), \ 240 .size = (sz), \ 241 .location = ptr, \ 242 .offset = offsetof(struct s, f), \ 243 .copy = (c) \ 244 } 245 246 #define GSBE_NESTED(i, sz, f, c) { \ 247 .id = (i), \ 248 .size = (sz), \ 249 .location = get_guest_ptr, \ 250 .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\ 251 .copy = (c), \ 252 .mask = HVMASK_DEFAULT \ 253 } 254 255 #define GSBE_NESTED_MSK(i, sz, f, c, m) { \ 256 .id = (i), \ 257 .size = (sz), \ 258 .location = get_guest_ptr, \ 259 .offset = offsetof(struct SpaprMachineStateNestedGuest, f),\ 260 .copy = (c), \ 261 .mask = (m) \ 262 } 263 264 #define GSBE_NESTED_VCPU(i, sz, f, c) { \ 265 .id = (i), \ 266 .size = (sz), \ 267 .location = get_vcpu_ptr, \ 268 .offset = offsetof(struct SpaprMachineStateNestedGuestVcpu, f),\ 269 .copy = (c), \ 270 .mask = HVMASK_DEFAULT \ 271 } 272 273 #define GUEST_STATE_ELEMENT_NOP(i, sz) { \ 274 .id = (i), \ 275 .size = (sz), \ 276 .location = NULL, \ 277 .offset = 0, \ 278 .copy = NULL, \ 279 .mask = HVMASK_DEFAULT \ 280 } 281 282 #define GUEST_STATE_ELEMENT_NOP_DW(i) \ 283 GUEST_STATE_ELEMENT_NOP(i, 8) 284 #define GUEST_STATE_ELEMENT_NOP_W(i) \ 285 GUEST_STATE_ELEMENT_NOP(i, 4) 286 287 #define GUEST_STATE_ELEMENT_BASE(i, s, c) { \ 288 .id = (i), \ 289 .size = (s), \ 290 .location = get_vcpu_state_ptr, \ 291 .offset = 0, \ 292 .copy = (c), \ 293 .mask = HVMASK_DEFAULT \ 294 } 295 296 #define GUEST_STATE_ELEMENT_OFF(i, s, f, c) { \ 297 .id = (i), \ 298 .size = (s), \ 299 .location = get_vcpu_state_ptr, \ 300 .offset = offsetof(struct nested_ppc_state, f), \ 301 .copy = (c), \ 302 .mask = HVMASK_DEFAULT \ 303 } 304 305 #define GUEST_STATE_ELEMENT_MSK(i, s, f, c, m) { \ 306 .id = (i), \ 307 .size = (s), \ 308 .location = get_vcpu_state_ptr, \ 309 .offset = offsetof(struct nested_ppc_state, f), \ 310 .copy = (c), \ 311 .mask = (m) \ 312 } 313 314 #define GUEST_STATE_ELEMENT_ENV_QW(i, f) \ 315 GUEST_STATE_ELEMENT_OFF(i, 16, f, copy_state_16to16) 316 #define GUEST_STATE_ELEMENT_ENV_DW(i, f) \ 317 GUEST_STATE_ELEMENT_OFF(i, 8, f, copy_state_8to8) 318 #define GUEST_STATE_ELEMENT_ENV_W(i, f) \ 319 GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to8) 320 #define GUEST_STATE_ELEMENT_ENV_WW(i, f) \ 321 GUEST_STATE_ELEMENT_OFF(i, 4, f, copy_state_4to4) 322 #define GSE_ENV_DWM(i, f, m) \ 323 GUEST_STATE_ELEMENT_MSK(i, 8, f, copy_state_8to8, m) 324 325 /* 326 * Register state for entering a nested guest with H_ENTER_NESTED. 327 * New member must be added at the end. 328 */ 329 struct kvmppc_hv_guest_state { 330 uint64_t version; /* version of this structure layout, must be first */ 331 uint32_t lpid; 332 uint32_t vcpu_token; 333 /* These registers are hypervisor privileged (at least for writing) */ 334 uint64_t lpcr; 335 uint64_t pcr; 336 uint64_t amor; 337 uint64_t dpdes; 338 uint64_t hfscr; 339 int64_t tb_offset; 340 uint64_t dawr0; 341 uint64_t dawrx0; 342 uint64_t ciabr; 343 uint64_t hdec_expiry; 344 uint64_t purr; 345 uint64_t spurr; 346 uint64_t ic; 347 uint64_t vtb; 348 uint64_t hdar; 349 uint64_t hdsisr; 350 uint64_t heir; 351 uint64_t asdr; 352 /* These are OS privileged but need to be set late in guest entry */ 353 uint64_t srr0; 354 uint64_t srr1; 355 uint64_t sprg[4]; 356 uint64_t pidr; 357 uint64_t cfar; 358 uint64_t ppr; 359 /* Version 1 ends here */ 360 uint64_t dawr1; 361 uint64_t dawrx1; 362 /* Version 2 ends here */ 363 }; 364 365 /* Latest version of hv_guest_state structure */ 366 #define HV_GUEST_STATE_VERSION 2 367 368 /* Linux 64-bit powerpc pt_regs struct, used by nested HV */ 369 struct kvmppc_pt_regs { 370 uint64_t gpr[32]; 371 uint64_t nip; 372 uint64_t msr; 373 uint64_t orig_gpr3; /* Used for restarting system calls */ 374 uint64_t ctr; 375 uint64_t link; 376 uint64_t xer; 377 uint64_t ccr; 378 uint64_t softe; /* Soft enabled/disabled */ 379 uint64_t trap; /* Reason for being here */ 380 uint64_t dar; /* Fault registers */ 381 uint64_t dsisr; /* on 4xx/Book-E used for ESR */ 382 uint64_t result; /* Result of a system call */ 383 }; 384 385 /* 386 * nested_ppc_state is used to save the host CPU state before switching it to 387 * the guest CPU state, to be restored on H_ENTER_NESTED exit. 388 */ 389 struct nested_ppc_state { 390 uint64_t gpr[32]; 391 uint64_t lr; 392 uint64_t ctr; 393 uint64_t cfar; 394 uint64_t msr; 395 uint64_t nip; 396 uint32_t cr; 397 398 uint64_t xer; 399 400 uint64_t lpcr; 401 uint64_t lpidr; 402 uint64_t pidr; 403 uint64_t pcr; 404 uint64_t dpdes; 405 uint64_t hfscr; 406 uint64_t srr0; 407 uint64_t srr1; 408 uint64_t sprg0; 409 uint64_t sprg1; 410 uint64_t sprg2; 411 uint64_t sprg3; 412 uint64_t ppr; 413 414 int64_t tb_offset; 415 /* Nested PAPR API */ 416 uint64_t amor; 417 uint64_t dawr0; 418 uint64_t dawrx0; 419 uint64_t ciabr; 420 uint64_t purr; 421 uint64_t spurr; 422 uint64_t ic; 423 uint64_t vtb; 424 uint64_t hdar; 425 uint64_t hdsisr; 426 uint64_t heir; 427 uint64_t asdr; 428 uint64_t dawr1; 429 uint64_t dawrx1; 430 uint64_t dexcr; 431 uint64_t hdexcr; 432 uint64_t hashkeyr; 433 uint64_t hashpkeyr; 434 ppc_vsr_t vsr[64] QEMU_ALIGNED(16); 435 uint64_t ebbhr; 436 uint64_t tar; 437 uint64_t ebbrr; 438 uint64_t bescr; 439 uint64_t iamr; 440 uint64_t amr; 441 uint64_t uamor; 442 uint64_t dscr; 443 uint64_t fscr; 444 uint64_t pspb; 445 uint64_t ctrl; 446 uint64_t vrsave; 447 uint64_t dar; 448 uint64_t dsisr; 449 uint64_t pmc1; 450 uint64_t pmc2; 451 uint64_t pmc3; 452 uint64_t pmc4; 453 uint64_t pmc5; 454 uint64_t pmc6; 455 uint64_t mmcr0; 456 uint64_t mmcr1; 457 uint64_t mmcr2; 458 uint64_t mmcra; 459 uint64_t sdar; 460 uint64_t siar; 461 uint64_t sier; 462 uint32_t vscr; 463 uint64_t fpscr; 464 int64_t dec_expiry_tb; 465 }; 466 467 struct SpaprMachineStateNestedGuestVcpuRunBuf { 468 uint64_t addr; 469 uint64_t size; 470 }; 471 472 typedef struct SpaprMachineStateNestedGuestVcpu { 473 bool enabled; 474 struct nested_ppc_state state; 475 struct SpaprMachineStateNestedGuestVcpuRunBuf runbufin; 476 struct SpaprMachineStateNestedGuestVcpuRunBuf runbufout; 477 int64_t tb_offset; 478 uint64_t hdecr_expiry_tb; 479 } SpaprMachineStateNestedGuestVcpu; 480 481 struct guest_state_element_type { 482 uint16_t id; 483 int size; 484 #define GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE 0x1 485 #define GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY 0x2 486 uint16_t flags; 487 void *(*location)(SpaprMachineStateNestedGuest *, target_ulong); 488 size_t offset; 489 void (*copy)(void *, void *, bool); 490 uint64_t mask; 491 }; 492 493 void spapr_exit_nested(PowerPCCPU *cpu, int excp); 494 typedef struct SpaprMachineState SpaprMachineState; 495 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 496 target_ulong lpid, ppc_v3_pate_t *entry); 497 uint8_t spapr_nested_api(SpaprMachineState *spapr); 498 void spapr_nested_gsb_init(void); 499 #endif /* HW_SPAPR_NESTED_H */ 500