1 /* 2 * Copyright 2008 IBM Corporation. 3 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 4 * 5 * This work is licensed under the GNU GPL license version 2 or later. 6 * 7 */ 8 9 #ifndef KVM_PPC_H 10 #define KVM_PPC_H 11 12 #define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host") 13 14 #ifdef CONFIG_KVM 15 16 uint32_t kvmppc_get_tbfreq(void); 17 uint64_t kvmppc_get_clockfreq(void); 18 bool kvmppc_get_host_model(char **buf); 19 bool kvmppc_get_host_serial(char **buf); 20 int kvmppc_get_hasidle(CPUPPCState *env); 21 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len); 22 int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level); 23 void kvmppc_enable_logical_ci_hcalls(void); 24 void kvmppc_enable_set_mode_hcall(void); 25 void kvmppc_enable_clear_ref_mod_hcalls(void); 26 void kvmppc_enable_h_page_init(void); 27 void kvmppc_set_papr(PowerPCCPU *cpu); 28 int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); 29 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy); 30 bool kvmppc_get_fwnmi(void); 31 int kvmppc_set_fwnmi(PowerPCCPU *cpu); 32 int kvmppc_smt_threads(void); 33 void kvmppc_error_append_smt_possible_hint(Error *const *errp); 34 int kvmppc_set_smt_threads(int smt); 35 int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); 36 int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); 37 int kvmppc_set_tcr(PowerPCCPU *cpu); 38 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu); 39 target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 40 bool radix, bool gtse, 41 uint64_t proc_tbl); 42 void kvmppc_svm_off(Error **errp); 43 #ifndef CONFIG_USER_ONLY 44 bool kvmppc_spapr_use_multitce(void); 45 int kvmppc_spapr_enable_inkernel_multitce(void); 46 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 47 uint64_t bus_offset, uint32_t nb_table, 48 int *pfd, bool need_vfio); 49 int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size); 50 int kvmppc_reset_htab(int shift_hint); 51 uint64_t kvmppc_vrma_limit(unsigned int hash_shift); 52 bool kvmppc_has_cap_spapr_vfio(void); 53 #endif /* !CONFIG_USER_ONLY */ 54 bool kvmppc_has_cap_epr(void); 55 int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function); 56 int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp); 57 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns); 58 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 59 uint16_t n_valid, uint16_t n_invalid, Error **errp); 60 void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n); 61 void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1); 62 bool kvmppc_has_cap_fixup_hcalls(void); 63 bool kvmppc_has_cap_htm(void); 64 bool kvmppc_has_cap_mmu_radix(void); 65 bool kvmppc_has_cap_mmu_hash_v3(void); 66 bool kvmppc_has_cap_xive(void); 67 int kvmppc_get_cap_safe_cache(void); 68 int kvmppc_get_cap_safe_bounds_check(void); 69 int kvmppc_get_cap_safe_indirect_branch(void); 70 int kvmppc_get_cap_count_cache_flush_assist(void); 71 bool kvmppc_has_cap_nested_kvm_hv(void); 72 int kvmppc_set_cap_nested_kvm_hv(int enable); 73 int kvmppc_get_cap_large_decr(void); 74 int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable); 75 int kvmppc_enable_hwrng(void); 76 int kvmppc_put_books_sregs(PowerPCCPU *cpu); 77 PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); 78 void kvmppc_check_papr_resize_hpt(Error **errp); 79 int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift); 80 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift); 81 bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu); 82 83 bool kvmppc_hpt_needs_host_contiguous_pages(void); 84 void kvm_check_mmu(PowerPCCPU *cpu, Error **errp); 85 void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online); 86 void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset); 87 88 int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run); 89 90 #else 91 92 static inline uint32_t kvmppc_get_tbfreq(void) 93 { 94 return 0; 95 } 96 97 static inline bool kvmppc_get_host_model(char **buf) 98 { 99 return false; 100 } 101 102 static inline bool kvmppc_get_host_serial(char **buf) 103 { 104 return false; 105 } 106 107 static inline uint64_t kvmppc_get_clockfreq(void) 108 { 109 return 0; 110 } 111 112 static inline uint32_t kvmppc_get_vmx(void) 113 { 114 return 0; 115 } 116 117 static inline uint32_t kvmppc_get_dfp(void) 118 { 119 return 0; 120 } 121 122 static inline int kvmppc_get_hasidle(CPUPPCState *env) 123 { 124 return 0; 125 } 126 127 static inline int kvmppc_get_hypercall(CPUPPCState *env, 128 uint8_t *buf, int buf_len) 129 { 130 return -1; 131 } 132 133 static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) 134 { 135 return -1; 136 } 137 138 static inline void kvmppc_enable_logical_ci_hcalls(void) 139 { 140 } 141 142 static inline void kvmppc_enable_set_mode_hcall(void) 143 { 144 } 145 146 static inline void kvmppc_enable_clear_ref_mod_hcalls(void) 147 { 148 } 149 150 static inline void kvmppc_enable_h_page_init(void) 151 { 152 } 153 154 static inline void kvmppc_set_papr(PowerPCCPU *cpu) 155 { 156 } 157 158 static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) 159 { 160 return 0; 161 } 162 163 static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) 164 { 165 } 166 167 static inline bool kvmppc_get_fwnmi(void) 168 { 169 return false; 170 } 171 172 static inline int kvmppc_set_fwnmi(PowerPCCPU *cpu) 173 { 174 return -1; 175 } 176 177 static inline int kvmppc_smt_threads(void) 178 { 179 return 1; 180 } 181 182 static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp) 183 { 184 return; 185 } 186 187 static inline int kvmppc_set_smt_threads(int smt) 188 { 189 return 0; 190 } 191 192 static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 193 { 194 return 0; 195 } 196 197 static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 198 { 199 return 0; 200 } 201 202 static inline int kvmppc_set_tcr(PowerPCCPU *cpu) 203 { 204 return 0; 205 } 206 207 static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) 208 { 209 return -1; 210 } 211 212 static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 213 bool radix, bool gtse, 214 uint64_t proc_tbl) 215 { 216 return 0; 217 } 218 219 static inline void kvmppc_svm_off(Error **errp) 220 { 221 return; 222 } 223 224 static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, 225 unsigned int online) 226 { 227 return; 228 } 229 230 static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset) 231 { 232 } 233 234 #ifndef CONFIG_USER_ONLY 235 static inline bool kvmppc_spapr_use_multitce(void) 236 { 237 return false; 238 } 239 240 static inline int kvmppc_spapr_enable_inkernel_multitce(void) 241 { 242 return -1; 243 } 244 245 static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 246 uint64_t bus_offset, 247 uint32_t nb_table, 248 int *pfd, bool need_vfio) 249 { 250 return NULL; 251 } 252 253 static inline int kvmppc_remove_spapr_tce(void *table, int pfd, 254 uint32_t nb_table) 255 { 256 return -1; 257 } 258 259 static inline int kvmppc_reset_htab(int shift_hint) 260 { 261 return 0; 262 } 263 264 static inline uint64_t kvmppc_vrma_limit(unsigned int hash_shift) 265 { 266 g_assert_not_reached(); 267 } 268 269 static inline bool kvmppc_hpt_needs_host_contiguous_pages(void) 270 { 271 return false; 272 } 273 274 static inline void kvm_check_mmu(PowerPCCPU *cpu, Error **errp) 275 { 276 } 277 278 static inline bool kvmppc_has_cap_spapr_vfio(void) 279 { 280 return false; 281 } 282 283 static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, 284 hwaddr ptex, int n) 285 { 286 abort(); 287 } 288 289 static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) 290 { 291 abort(); 292 } 293 294 #endif /* !CONFIG_USER_ONLY */ 295 296 static inline bool kvmppc_has_cap_epr(void) 297 { 298 return false; 299 } 300 301 static inline int kvmppc_define_rtas_kernel_token(uint32_t token, 302 const char *function) 303 { 304 return -1; 305 } 306 307 static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp) 308 { 309 return -1; 310 } 311 312 static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, 313 int64_t max_ns) 314 { 315 abort(); 316 } 317 318 static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 319 uint16_t n_valid, uint16_t n_invalid, 320 Error **errp) 321 { 322 abort(); 323 } 324 325 static inline bool kvmppc_has_cap_fixup_hcalls(void) 326 { 327 abort(); 328 } 329 330 static inline bool kvmppc_has_cap_htm(void) 331 { 332 return false; 333 } 334 335 static inline bool kvmppc_has_cap_mmu_radix(void) 336 { 337 return false; 338 } 339 340 static inline bool kvmppc_has_cap_mmu_hash_v3(void) 341 { 342 return false; 343 } 344 345 static inline bool kvmppc_has_cap_xive(void) 346 { 347 return false; 348 } 349 350 static inline int kvmppc_get_cap_safe_cache(void) 351 { 352 return 0; 353 } 354 355 static inline int kvmppc_get_cap_safe_bounds_check(void) 356 { 357 return 0; 358 } 359 360 static inline int kvmppc_get_cap_safe_indirect_branch(void) 361 { 362 return 0; 363 } 364 365 static inline int kvmppc_get_cap_count_cache_flush_assist(void) 366 { 367 return 0; 368 } 369 370 static inline bool kvmppc_has_cap_nested_kvm_hv(void) 371 { 372 return false; 373 } 374 375 static inline int kvmppc_set_cap_nested_kvm_hv(int enable) 376 { 377 return -1; 378 } 379 380 static inline int kvmppc_get_cap_large_decr(void) 381 { 382 return 0; 383 } 384 385 static inline int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable) 386 { 387 return -1; 388 } 389 390 static inline int kvmppc_enable_hwrng(void) 391 { 392 return -1; 393 } 394 395 static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu) 396 { 397 abort(); 398 } 399 400 static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) 401 { 402 return NULL; 403 } 404 405 static inline void kvmppc_check_papr_resize_hpt(Error **errp) 406 { 407 return; 408 } 409 410 static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, 411 target_ulong flags, int shift) 412 { 413 return -ENOSYS; 414 } 415 416 static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, 417 target_ulong flags, int shift) 418 { 419 return -ENOSYS; 420 } 421 422 static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu) 423 { 424 return false; 425 } 426 427 #endif 428 429 #ifndef CONFIG_KVM 430 431 #define kvmppc_eieio() do { } while (0) 432 433 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) 434 { 435 } 436 437 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) 438 { 439 } 440 441 #else /* CONFIG_KVM */ 442 443 #define kvmppc_eieio() \ 444 do { \ 445 if (kvm_enabled()) { \ 446 asm volatile("eieio" : : : "memory"); \ 447 } \ 448 } while (0) 449 450 /* Store data cache blocks back to memory */ 451 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) 452 { 453 uint8_t *p; 454 455 for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) { 456 asm volatile("dcbst 0,%0" : : "r"(p) : "memory"); 457 } 458 } 459 460 /* Invalidate instruction cache blocks */ 461 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) 462 { 463 uint8_t *p; 464 465 for (p = addr; p < addr + len; p += cpu->env.icache_line_size) { 466 asm volatile("icbi 0,%0" : : "r"(p)); 467 } 468 } 469 470 #endif /* CONFIG_KVM */ 471 472 #endif /* KVM_PPC_H */ 473