1 /* 2 * Copyright 2008 IBM Corporation. 3 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 4 * 5 * This work is licensed under the GNU GPL license version 2 or later. 6 * 7 */ 8 9 #ifndef KVM_PPC_H 10 #define KVM_PPC_H 11 12 #define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host") 13 14 #ifdef CONFIG_KVM 15 16 uint32_t kvmppc_get_tbfreq(void); 17 uint64_t kvmppc_get_clockfreq(void); 18 bool kvmppc_get_host_model(char **buf); 19 bool kvmppc_get_host_serial(char **buf); 20 int kvmppc_get_hasidle(CPUPPCState *env); 21 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len); 22 int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level); 23 void kvmppc_enable_logical_ci_hcalls(void); 24 void kvmppc_enable_set_mode_hcall(void); 25 void kvmppc_enable_clear_ref_mod_hcalls(void); 26 void kvmppc_enable_h_page_init(void); 27 void kvmppc_set_papr(PowerPCCPU *cpu); 28 int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr); 29 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy); 30 int kvmppc_set_fwnmi(void); 31 int kvmppc_smt_threads(void); 32 void kvmppc_error_append_smt_possible_hint(Error *const *errp); 33 int kvmppc_set_smt_threads(int smt); 34 int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); 35 int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits); 36 int kvmppc_set_tcr(PowerPCCPU *cpu); 37 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu); 38 target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 39 bool radix, bool gtse, 40 uint64_t proc_tbl); 41 void kvmppc_svm_off(Error **errp); 42 #ifndef CONFIG_USER_ONLY 43 bool kvmppc_spapr_use_multitce(void); 44 int kvmppc_spapr_enable_inkernel_multitce(void); 45 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 46 uint64_t bus_offset, uint32_t nb_table, 47 int *pfd, bool need_vfio); 48 int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size); 49 int kvmppc_reset_htab(int shift_hint); 50 uint64_t kvmppc_vrma_limit(unsigned int hash_shift); 51 bool kvmppc_has_cap_spapr_vfio(void); 52 #endif /* !CONFIG_USER_ONLY */ 53 bool kvmppc_has_cap_epr(void); 54 int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function); 55 int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp); 56 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns); 57 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 58 uint16_t n_valid, uint16_t n_invalid); 59 void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n); 60 void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1); 61 bool kvmppc_has_cap_fixup_hcalls(void); 62 bool kvmppc_has_cap_htm(void); 63 bool kvmppc_has_cap_mmu_radix(void); 64 bool kvmppc_has_cap_mmu_hash_v3(void); 65 bool kvmppc_has_cap_xive(void); 66 int kvmppc_get_cap_safe_cache(void); 67 int kvmppc_get_cap_safe_bounds_check(void); 68 int kvmppc_get_cap_safe_indirect_branch(void); 69 int kvmppc_get_cap_count_cache_flush_assist(void); 70 bool kvmppc_has_cap_nested_kvm_hv(void); 71 int kvmppc_set_cap_nested_kvm_hv(int enable); 72 int kvmppc_get_cap_large_decr(void); 73 int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable); 74 int kvmppc_enable_hwrng(void); 75 int kvmppc_put_books_sregs(PowerPCCPU *cpu); 76 PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void); 77 void kvmppc_check_papr_resize_hpt(Error **errp); 78 int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift); 79 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift); 80 bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu); 81 82 bool kvmppc_hpt_needs_host_contiguous_pages(void); 83 void kvm_check_mmu(PowerPCCPU *cpu, Error **errp); 84 void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online); 85 void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset); 86 87 int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run); 88 89 #else 90 91 static inline uint32_t kvmppc_get_tbfreq(void) 92 { 93 return 0; 94 } 95 96 static inline bool kvmppc_get_host_model(char **buf) 97 { 98 return false; 99 } 100 101 static inline bool kvmppc_get_host_serial(char **buf) 102 { 103 return false; 104 } 105 106 static inline uint64_t kvmppc_get_clockfreq(void) 107 { 108 return 0; 109 } 110 111 static inline uint32_t kvmppc_get_vmx(void) 112 { 113 return 0; 114 } 115 116 static inline uint32_t kvmppc_get_dfp(void) 117 { 118 return 0; 119 } 120 121 static inline int kvmppc_get_hasidle(CPUPPCState *env) 122 { 123 return 0; 124 } 125 126 static inline int kvmppc_get_hypercall(CPUPPCState *env, 127 uint8_t *buf, int buf_len) 128 { 129 return -1; 130 } 131 132 static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) 133 { 134 return -1; 135 } 136 137 static inline void kvmppc_enable_logical_ci_hcalls(void) 138 { 139 } 140 141 static inline void kvmppc_enable_set_mode_hcall(void) 142 { 143 } 144 145 static inline void kvmppc_enable_clear_ref_mod_hcalls(void) 146 { 147 } 148 149 static inline void kvmppc_enable_h_page_init(void) 150 { 151 } 152 153 static inline void kvmppc_set_papr(PowerPCCPU *cpu) 154 { 155 } 156 157 static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr) 158 { 159 return 0; 160 } 161 162 static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy) 163 { 164 } 165 166 static inline int kvmppc_set_fwnmi(void) 167 { 168 return -1; 169 } 170 171 static inline int kvmppc_smt_threads(void) 172 { 173 return 1; 174 } 175 176 static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp) 177 { 178 return; 179 } 180 181 static inline int kvmppc_set_smt_threads(int smt) 182 { 183 return 0; 184 } 185 186 static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 187 { 188 return 0; 189 } 190 191 static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits) 192 { 193 return 0; 194 } 195 196 static inline int kvmppc_set_tcr(PowerPCCPU *cpu) 197 { 198 return 0; 199 } 200 201 static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu) 202 { 203 return -1; 204 } 205 206 static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu, 207 bool radix, bool gtse, 208 uint64_t proc_tbl) 209 { 210 return 0; 211 } 212 213 static inline void kvmppc_svm_off(Error **errp) 214 { 215 return; 216 } 217 218 static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, 219 unsigned int online) 220 { 221 return; 222 } 223 224 static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset) 225 { 226 } 227 228 #ifndef CONFIG_USER_ONLY 229 static inline bool kvmppc_spapr_use_multitce(void) 230 { 231 return false; 232 } 233 234 static inline int kvmppc_spapr_enable_inkernel_multitce(void) 235 { 236 return -1; 237 } 238 239 static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift, 240 uint64_t bus_offset, 241 uint32_t nb_table, 242 int *pfd, bool need_vfio) 243 { 244 return NULL; 245 } 246 247 static inline int kvmppc_remove_spapr_tce(void *table, int pfd, 248 uint32_t nb_table) 249 { 250 return -1; 251 } 252 253 static inline int kvmppc_reset_htab(int shift_hint) 254 { 255 return 0; 256 } 257 258 static inline uint64_t kvmppc_vrma_limit(unsigned int hash_shift) 259 { 260 g_assert_not_reached(); 261 } 262 263 static inline bool kvmppc_hpt_needs_host_contiguous_pages(void) 264 { 265 return false; 266 } 267 268 static inline void kvm_check_mmu(PowerPCCPU *cpu, Error **errp) 269 { 270 } 271 272 static inline bool kvmppc_has_cap_spapr_vfio(void) 273 { 274 return false; 275 } 276 277 #endif /* !CONFIG_USER_ONLY */ 278 279 static inline bool kvmppc_has_cap_epr(void) 280 { 281 return false; 282 } 283 284 static inline int kvmppc_define_rtas_kernel_token(uint32_t token, 285 const char *function) 286 { 287 return -1; 288 } 289 290 static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp) 291 { 292 return -1; 293 } 294 295 static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, 296 int64_t max_ns) 297 { 298 abort(); 299 } 300 301 static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, 302 uint16_t n_valid, uint16_t n_invalid) 303 { 304 abort(); 305 } 306 307 static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, 308 hwaddr ptex, int n) 309 { 310 abort(); 311 } 312 313 static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1) 314 { 315 abort(); 316 } 317 318 static inline bool kvmppc_has_cap_fixup_hcalls(void) 319 { 320 abort(); 321 } 322 323 static inline bool kvmppc_has_cap_htm(void) 324 { 325 return false; 326 } 327 328 static inline bool kvmppc_has_cap_mmu_radix(void) 329 { 330 return false; 331 } 332 333 static inline bool kvmppc_has_cap_mmu_hash_v3(void) 334 { 335 return false; 336 } 337 338 static inline bool kvmppc_has_cap_xive(void) 339 { 340 return false; 341 } 342 343 static inline int kvmppc_get_cap_safe_cache(void) 344 { 345 return 0; 346 } 347 348 static inline int kvmppc_get_cap_safe_bounds_check(void) 349 { 350 return 0; 351 } 352 353 static inline int kvmppc_get_cap_safe_indirect_branch(void) 354 { 355 return 0; 356 } 357 358 static inline int kvmppc_get_cap_count_cache_flush_assist(void) 359 { 360 return 0; 361 } 362 363 static inline bool kvmppc_has_cap_nested_kvm_hv(void) 364 { 365 return false; 366 } 367 368 static inline int kvmppc_set_cap_nested_kvm_hv(int enable) 369 { 370 return -1; 371 } 372 373 static inline int kvmppc_get_cap_large_decr(void) 374 { 375 return 0; 376 } 377 378 static inline int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable) 379 { 380 return -1; 381 } 382 383 static inline int kvmppc_enable_hwrng(void) 384 { 385 return -1; 386 } 387 388 static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu) 389 { 390 abort(); 391 } 392 393 static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) 394 { 395 return NULL; 396 } 397 398 static inline void kvmppc_check_papr_resize_hpt(Error **errp) 399 { 400 return; 401 } 402 403 static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, 404 target_ulong flags, int shift) 405 { 406 return -ENOSYS; 407 } 408 409 static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, 410 target_ulong flags, int shift) 411 { 412 return -ENOSYS; 413 } 414 415 static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu) 416 { 417 return false; 418 } 419 420 #endif 421 422 #ifndef CONFIG_KVM 423 424 #define kvmppc_eieio() do { } while (0) 425 426 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) 427 { 428 } 429 430 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) 431 { 432 } 433 434 #else /* CONFIG_KVM */ 435 436 #define kvmppc_eieio() \ 437 do { \ 438 if (kvm_enabled()) { \ 439 asm volatile("eieio" : : : "memory"); \ 440 } \ 441 } while (0) 442 443 /* Store data cache blocks back to memory */ 444 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len) 445 { 446 uint8_t *p; 447 448 for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) { 449 asm volatile("dcbst 0,%0" : : "r"(p) : "memory"); 450 } 451 } 452 453 /* Invalidate instruction cache blocks */ 454 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len) 455 { 456 uint8_t *p; 457 458 for (p = addr; p < addr + len; p += cpu->env.icache_line_size) { 459 asm volatile("icbi 0,%0" : : "r"(p)); 460 } 461 } 462 463 #endif /* CONFIG_KVM */ 464 465 #endif /* KVM_PPC_H */ 466